1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #ifndef __iwl_trans_h__
64 #define __iwl_trans_h__
65 
66 #include <linux/debugfs.h>
67 #include <linux/skbuff.h>
68 
69 #include "iwl-shared.h"
70 #include "iwl-commands.h"
71 
72  /*This file includes the declaration that are exported from the transport
73  * layer */
74 
75 struct iwl_priv;
76 struct iwl_shared;
77 
78 #define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
79 #define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
80 #define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
81 
82 enum {
83 	CMD_SYNC = 0,
84 	CMD_ASYNC = BIT(0),
85 	CMD_WANT_SKB = BIT(1),
86 	CMD_ON_DEMAND = BIT(2),
87 };
88 
89 #define DEF_CMD_PAYLOAD_SIZE 320
90 
91 /**
92  * struct iwl_device_cmd
93  *
94  * For allocation of the command and tx queues, this establishes the overall
95  * size of the largest command we send to uCode, except for commands that
96  * aren't fully copied and use other TFD space.
97  */
98 struct iwl_device_cmd {
99 	struct iwl_cmd_header hdr;	/* uCode API */
100 	u8 payload[DEF_CMD_PAYLOAD_SIZE];
101 } __packed;
102 
103 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
104 
105 #define IWL_MAX_CMD_TFDS	2
106 
107 enum iwl_hcmd_dataflag {
108 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
109 };
110 
111 /**
112  * struct iwl_host_cmd - Host command to the uCode
113  * @data: array of chunks that composes the data of the host command
114  * @reply_page: pointer to the page that holds the response to the host command
115  * @handler_status: return value of the handler of the command
116  *	(put in setup_rx_handlers) - valid for SYNC mode only
117  * @callback:
118  * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
119  * @len: array of the lenths of the chunks in data
120  * @dataflags:
121  * @id: id of the host command
122  */
123 struct iwl_host_cmd {
124 	const void *data[IWL_MAX_CMD_TFDS];
125 	unsigned long reply_page;
126 	int handler_status;
127 
128 	u32 flags;
129 	u16 len[IWL_MAX_CMD_TFDS];
130 	u8 dataflags[IWL_MAX_CMD_TFDS];
131 	u8 id;
132 };
133 
134 /**
135  * struct iwl_trans_ops - transport specific operations
136  * @alloc: allocates the meta data (not the queues themselves)
137  * @request_irq: requests IRQ - will be called before the FW load in probe flow
138  * @start_device: allocates and inits all the resources for the transport
139  *                layer.
140  * @prepare_card_hw: claim the ownership on the HW. Will be called during
141  *                   probe.
142  * @tx_start: starts and configures all the Tx fifo - usually done once the fw
143  *           is alive.
144  * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
145  * @stop_device:stops the whole device (embedded CPU put to reset)
146  * @send_cmd:send a host command
147  * @tx: send an skb
148  * @reclaim: free packet until ssn. Returns a list of freed packets.
149  * @tx_agg_alloc: allocate resources for a TX BA session
150  * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
151  *                 ready and a successful ADDBA response has been received.
152  * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
153  * @kick_nic: remove the RESET from the embedded CPU and let it run
154  * @free: release all the ressource for the transport layer itself such as
155  *        irq, tasklet etc...
156  * @stop_queue: stop a specific queue
157  * @check_stuck_queue: check if a specific queue is stuck
158  * @wait_tx_queue_empty: wait until all tx queues are empty
159  * @dbgfs_register: add the dbgfs files under this directory. Files will be
160  *	automatically deleted.
161  * @suspend: stop the device unless WoWLAN is configured
162  * @resume: resume activity of the device
163  */
164 struct iwl_trans_ops {
165 
166 	struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
167 	int (*request_irq)(struct iwl_trans *iwl_trans);
168 	int (*start_device)(struct iwl_trans *trans);
169 	int (*prepare_card_hw)(struct iwl_trans *trans);
170 	void (*stop_device)(struct iwl_trans *trans);
171 	void (*tx_start)(struct iwl_trans *trans);
172 
173 	void (*wake_any_queue)(struct iwl_trans *trans,
174 			       enum iwl_rxon_context_id ctx,
175 			       const char *msg);
176 
177 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
178 
179 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
180 		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
181 		u8 sta_id, u8 tid);
182 	int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
183 			int txq_id, int ssn, u32 status,
184 			struct sk_buff_head *skbs);
185 
186 	int (*tx_agg_disable)(struct iwl_trans *trans,
187 			      int sta_id, int tid);
188 	int (*tx_agg_alloc)(struct iwl_trans *trans,
189 			    int sta_id, int tid);
190 	void (*tx_agg_setup)(struct iwl_trans *trans,
191 			     enum iwl_rxon_context_id ctx, int sta_id, int tid,
192 			     int frame_limit, u16 ssn);
193 
194 	void (*kick_nic)(struct iwl_trans *trans);
195 
196 	void (*free)(struct iwl_trans *trans);
197 
198 	void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
199 
200 	int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
201 	int (*check_stuck_queue)(struct iwl_trans *trans, int q);
202 	int (*wait_tx_queue_empty)(struct iwl_trans *trans);
203 #ifdef CONFIG_PM_SLEEP
204 	int (*suspend)(struct iwl_trans *trans);
205 	int (*resume)(struct iwl_trans *trans);
206 #endif
207 };
208 
209 /* one for each uCode image (inst/data, boot/init/runtime) */
210 struct fw_desc {
211 	dma_addr_t p_addr;	/* hardware address */
212 	void *v_addr;		/* software address */
213 	u32 len;		/* size in bytes */
214 };
215 
216 struct fw_img {
217 	struct fw_desc code;	/* firmware code image */
218 	struct fw_desc data;	/* firmware data image */
219 };
220 
221 /* Opaque calibration results */
222 struct iwl_calib_result {
223 	struct list_head list;
224 	size_t cmd_len;
225 	struct iwl_calib_hdr hdr;
226 	/* data follows */
227 };
228 
229 /**
230  * struct iwl_trans - transport common data
231  * @ops - pointer to iwl_trans_ops
232  * @shrd - pointer to iwl_shared which holds shared data from the upper layer
233  * @hcmd_lock: protects HCMD
234  * @ucode_write_complete: indicates that the ucode has been copied.
235  * @ucode_rt: run time ucode image
236  * @ucode_init: init ucode image
237  * @ucode_wowlan: wake on wireless ucode image (optional)
238  * @nvm_device_type: indicates OTP or eeprom
239  * @calib_results: list head for init calibration results
240  */
241 struct iwl_trans {
242 	const struct iwl_trans_ops *ops;
243 	struct iwl_shared *shrd;
244 	spinlock_t hcmd_lock;
245 
246 	u8 ucode_write_complete;	/* the image write is complete */
247 	struct fw_img ucode_rt;
248 	struct fw_img ucode_init;
249 	struct fw_img ucode_wowlan;
250 
251 	/* eeprom related variables */
252 	int    nvm_device_type;
253 
254 	/* init calibration results */
255 	struct list_head calib_results;
256 
257 	/* pointer to trans specific struct */
258 	/*Ensure that this pointer will always be aligned to sizeof pointer */
259 	char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
260 };
261 
iwl_trans_request_irq(struct iwl_trans * trans)262 static inline int iwl_trans_request_irq(struct iwl_trans *trans)
263 {
264 	return trans->ops->request_irq(trans);
265 }
266 
iwl_trans_start_device(struct iwl_trans * trans)267 static inline int iwl_trans_start_device(struct iwl_trans *trans)
268 {
269 	return trans->ops->start_device(trans);
270 }
271 
iwl_trans_prepare_card_hw(struct iwl_trans * trans)272 static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
273 {
274 	return trans->ops->prepare_card_hw(trans);
275 }
276 
iwl_trans_stop_device(struct iwl_trans * trans)277 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
278 {
279 	trans->ops->stop_device(trans);
280 }
281 
iwl_trans_tx_start(struct iwl_trans * trans)282 static inline void iwl_trans_tx_start(struct iwl_trans *trans)
283 {
284 	trans->ops->tx_start(trans);
285 }
286 
iwl_trans_wake_any_queue(struct iwl_trans * trans,enum iwl_rxon_context_id ctx,const char * msg)287 static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
288 					    enum iwl_rxon_context_id ctx,
289 					    const char *msg)
290 {
291 	trans->ops->wake_any_queue(trans, ctx, msg);
292 }
293 
294 
iwl_trans_send_cmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)295 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
296 				struct iwl_host_cmd *cmd)
297 {
298 	return trans->ops->send_cmd(trans, cmd);
299 }
300 
301 int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
302 			   u32 flags, u16 len, const void *data);
303 
iwl_trans_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_cmd * dev_cmd,enum iwl_rxon_context_id ctx,u8 sta_id,u8 tid)304 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
305 		struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
306 		u8 sta_id, u8 tid)
307 {
308 	return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
309 }
310 
iwl_trans_reclaim(struct iwl_trans * trans,int sta_id,int tid,int txq_id,int ssn,u32 status,struct sk_buff_head * skbs)311 static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
312 				 int tid, int txq_id, int ssn, u32 status,
313 				 struct sk_buff_head *skbs)
314 {
315 	return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
316 				   status, skbs);
317 }
318 
iwl_trans_tx_agg_disable(struct iwl_trans * trans,int sta_id,int tid)319 static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
320 					    int sta_id, int tid)
321 {
322 	return trans->ops->tx_agg_disable(trans, sta_id, tid);
323 }
324 
iwl_trans_tx_agg_alloc(struct iwl_trans * trans,int sta_id,int tid)325 static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
326 					 int sta_id, int tid)
327 {
328 	return trans->ops->tx_agg_alloc(trans, sta_id, tid);
329 }
330 
331 
iwl_trans_tx_agg_setup(struct iwl_trans * trans,enum iwl_rxon_context_id ctx,int sta_id,int tid,int frame_limit,u16 ssn)332 static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
333 					   enum iwl_rxon_context_id ctx,
334 					   int sta_id, int tid,
335 					   int frame_limit, u16 ssn)
336 {
337 	trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
338 }
339 
iwl_trans_kick_nic(struct iwl_trans * trans)340 static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
341 {
342 	trans->ops->kick_nic(trans);
343 }
344 
iwl_trans_free(struct iwl_trans * trans)345 static inline void iwl_trans_free(struct iwl_trans *trans)
346 {
347 	trans->ops->free(trans);
348 }
349 
iwl_trans_stop_queue(struct iwl_trans * trans,int q,const char * msg)350 static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
351 					const char *msg)
352 {
353 	trans->ops->stop_queue(trans, q, msg);
354 }
355 
iwl_trans_wait_tx_queue_empty(struct iwl_trans * trans)356 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
357 {
358 	return trans->ops->wait_tx_queue_empty(trans);
359 }
360 
iwl_trans_check_stuck_queue(struct iwl_trans * trans,int q)361 static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
362 {
363 	return trans->ops->check_stuck_queue(trans, q);
364 }
iwl_trans_dbgfs_register(struct iwl_trans * trans,struct dentry * dir)365 static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
366 					    struct dentry *dir)
367 {
368 	return trans->ops->dbgfs_register(trans, dir);
369 }
370 
371 #ifdef CONFIG_PM_SLEEP
iwl_trans_suspend(struct iwl_trans * trans)372 static inline int iwl_trans_suspend(struct iwl_trans *trans)
373 {
374 	return trans->ops->suspend(trans);
375 }
376 
iwl_trans_resume(struct iwl_trans * trans)377 static inline int iwl_trans_resume(struct iwl_trans *trans)
378 {
379 	return trans->ops->resume(trans);
380 }
381 #endif
382 
383 /*****************************************************
384 * Transport layers implementations
385 ******************************************************/
386 extern const struct iwl_trans_ops trans_ops_pcie;
387 
388 int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
389 		      const void *data, size_t len);
390 void iwl_dealloc_ucode(struct iwl_trans *trans);
391 
392 int iwl_send_calib_results(struct iwl_trans *trans);
393 int iwl_calib_set(struct iwl_trans *trans,
394 		  const struct iwl_calib_hdr *cmd, int len);
395 void iwl_calib_free_results(struct iwl_trans *trans);
396 
397 #endif /* __iwl_trans_h__ */
398