1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #ifndef __RTW_HCI_H__ 6 #define __RTW_HCI_H__ 7 8 /* ops for PCI, USB and SDIO */ 9 struct rtw_hci_ops { 10 int (*tx_write)(struct rtw_dev *rtwdev, 11 struct rtw_tx_pkt_info *pkt_info, 12 struct sk_buff *skb); 13 void (*tx_kick_off)(struct rtw_dev *rtwdev); 14 void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop); 15 int (*setup)(struct rtw_dev *rtwdev); 16 int (*start)(struct rtw_dev *rtwdev); 17 void (*stop)(struct rtw_dev *rtwdev); 18 void (*deep_ps)(struct rtw_dev *rtwdev, bool enter); 19 void (*link_ps)(struct rtw_dev *rtwdev, bool enter); 20 void (*interface_cfg)(struct rtw_dev *rtwdev); 21 void (*dynamic_rx_agg)(struct rtw_dev *rtwdev, bool enable); 22 void (*write_firmware_page)(struct rtw_dev *rtwdev, u32 page, 23 const u8 *data, u32 size); 24 25 int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size); 26 int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size); 27 28 u8 (*read8)(struct rtw_dev *rtwdev, u32 addr); 29 u16 (*read16)(struct rtw_dev *rtwdev, u32 addr); 30 u32 (*read32)(struct rtw_dev *rtwdev, u32 addr); 31 void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val); 32 void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val); 33 void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val); 34 }; 35 36 static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev, 37 struct rtw_tx_pkt_info *pkt_info, 38 struct sk_buff *skb) 39 { 40 return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb); 41 } 42 43 static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev) 44 { 45 return rtwdev->hci.ops->tx_kick_off(rtwdev); 46 } 47 48 static inline int rtw_hci_setup(struct rtw_dev *rtwdev) 49 { 50 return rtwdev->hci.ops->setup(rtwdev); 51 } 52 53 static inline int rtw_hci_start(struct rtw_dev *rtwdev) 54 { 55 return rtwdev->hci.ops->start(rtwdev); 56 } 57 58 static inline void rtw_hci_stop(struct rtw_dev *rtwdev) 59 { 60 rtwdev->hci.ops->stop(rtwdev); 61 } 62 63 static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter) 64 { 65 rtwdev->hci.ops->deep_ps(rtwdev, enter); 66 } 67 68 static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter) 69 { 70 rtwdev->hci.ops->link_ps(rtwdev, enter); 71 } 72 73 static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev) 74 { 75 rtwdev->hci.ops->interface_cfg(rtwdev); 76 } 77 78 static inline void rtw_hci_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) 79 { 80 if (rtwdev->hci.ops->dynamic_rx_agg) 81 rtwdev->hci.ops->dynamic_rx_agg(rtwdev, enable); 82 } 83 84 static inline void rtw_hci_write_firmware_page(struct rtw_dev *rtwdev, u32 page, 85 const u8 *data, u32 size) 86 { 87 rtwdev->hci.ops->write_firmware_page(rtwdev, page, data, size); 88 } 89 90 static inline int 91 rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) 92 { 93 return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size); 94 } 95 96 static inline int 97 rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 98 { 99 return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size); 100 } 101 102 static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr) 103 { 104 return rtwdev->hci.ops->read8(rtwdev, addr); 105 } 106 107 static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr) 108 { 109 return rtwdev->hci.ops->read16(rtwdev, addr); 110 } 111 112 static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr) 113 { 114 return rtwdev->hci.ops->read32(rtwdev, addr); 115 } 116 117 static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 118 { 119 rtwdev->hci.ops->write8(rtwdev, addr, val); 120 } 121 122 static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 123 { 124 rtwdev->hci.ops->write16(rtwdev, addr, val); 125 } 126 127 static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 128 { 129 rtwdev->hci.ops->write32(rtwdev, addr, val); 130 } 131 132 static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit) 133 { 134 u8 val; 135 136 val = rtw_read8(rtwdev, addr); 137 rtw_write8(rtwdev, addr, val | bit); 138 } 139 140 static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit) 141 { 142 u16 val; 143 144 val = rtw_read16(rtwdev, addr); 145 rtw_write16(rtwdev, addr, val | bit); 146 } 147 148 static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit) 149 { 150 u32 val; 151 152 val = rtw_read32(rtwdev, addr); 153 rtw_write32(rtwdev, addr, val | bit); 154 } 155 156 static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit) 157 { 158 u8 val; 159 160 val = rtw_read8(rtwdev, addr); 161 rtw_write8(rtwdev, addr, val & ~bit); 162 } 163 164 static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit) 165 { 166 u16 val; 167 168 val = rtw_read16(rtwdev, addr); 169 rtw_write16(rtwdev, addr, val & ~bit); 170 } 171 172 static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit) 173 { 174 u32 val; 175 176 val = rtw_read32(rtwdev, addr); 177 rtw_write32(rtwdev, addr, val & ~bit); 178 } 179 180 static inline u32 181 rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 182 u32 addr, u32 mask) 183 { 184 u32 val; 185 186 lockdep_assert_held(&rtwdev->mutex); 187 188 val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask); 189 190 return val; 191 } 192 193 static inline void 194 rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path, 195 u32 addr, u32 mask, u32 data) 196 { 197 lockdep_assert_held(&rtwdev->mutex); 198 199 rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data); 200 } 201 202 static inline u32 203 rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) 204 { 205 u32 shift = __ffs(mask); 206 u32 orig; 207 u32 ret; 208 209 orig = rtw_read32(rtwdev, addr); 210 ret = (orig & mask) >> shift; 211 212 return ret; 213 } 214 215 static inline u16 216 rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) 217 { 218 u32 shift = __ffs(mask); 219 u32 orig; 220 u32 ret; 221 222 orig = rtw_read16(rtwdev, addr); 223 ret = (orig & mask) >> shift; 224 225 return ret; 226 } 227 228 static inline u8 229 rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask) 230 { 231 u32 shift = __ffs(mask); 232 u32 orig; 233 u32 ret; 234 235 orig = rtw_read8(rtwdev, addr); 236 ret = (orig & mask) >> shift; 237 238 return ret; 239 } 240 241 static inline void 242 rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) 243 { 244 u32 shift = __ffs(mask); 245 u32 orig; 246 u32 set; 247 248 WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr); 249 250 orig = rtw_read32(rtwdev, addr); 251 set = (orig & ~mask) | ((data << shift) & mask); 252 rtw_write32(rtwdev, addr, set); 253 } 254 255 static inline void 256 rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data) 257 { 258 u32 shift; 259 u8 orig, set; 260 261 mask &= 0xff; 262 shift = __ffs(mask); 263 264 orig = rtw_read8(rtwdev, addr); 265 set = (orig & ~mask) | ((data << shift) & mask); 266 rtw_write8(rtwdev, addr, set); 267 } 268 269 static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev) 270 { 271 return rtwdev->hci.type; 272 } 273 274 static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues, 275 bool drop) 276 { 277 if (rtwdev->hci.ops->flush_queues) 278 rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); 279 } 280 281 static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop) 282 { 283 if (rtwdev->hci.ops->flush_queues) 284 rtwdev->hci.ops->flush_queues(rtwdev, 285 BIT(rtwdev->hw->queues) - 1, 286 drop); 287 } 288 289 #endif 290