xref: /src/sys/dev/mrsas/mrsas.c (revision 93122ead724b3ba0ccdaedadcd371ec53f9a9844)
1 /*
2  * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3  * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4  * Support: freebsdraid@avagotech.com
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer. 2. Redistributions
12  * in binary form must reproduce the above copyright notice, this list of
13  * conditions and the following disclaimer in the documentation and/or other
14  * materials provided with the distribution. 3. Neither the name of the
15  * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16  * promote products derived from this software without specific prior written
17  * permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing
33  * official policies,either expressed or implied, of the FreeBSD Project.
34  *
35  * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36  * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37  *
38  */
39 
40 #include <sys/cdefs.h>
41 #include <dev/mrsas/mrsas.h>
42 #include <dev/mrsas/mrsas_ioctl.h>
43 
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 
47 #include <sys/sysctl.h>
48 #include <sys/types.h>
49 #include <sys/sysent.h>
50 #include <sys/kthread.h>
51 #include <sys/taskqueue.h>
52 #include <sys/smp.h>
53 #include <sys/endian.h>
54 
55 /*
56  * Function prototypes
57  */
58 static d_open_t mrsas_open;
59 static d_close_t mrsas_close;
60 static d_ioctl_t mrsas_ioctl;
61 static d_poll_t mrsas_poll;
62 
63 static void mrsas_ich_startup(void *arg);
64 static struct mrsas_mgmt_info mrsas_mgmt_info;
65 static struct mrsas_ident *mrsas_find_ident(device_t);
66 static int mrsas_setup_msix(struct mrsas_softc *sc);
67 static int mrsas_allocate_msix(struct mrsas_softc *sc);
68 static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
69 static void mrsas_flush_cache(struct mrsas_softc *sc);
70 static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
71 static void mrsas_ocr_thread(void *arg);
72 static int mrsas_get_map_info(struct mrsas_softc *sc);
73 static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
74 static int mrsas_sync_map_info(struct mrsas_softc *sc);
75 static int mrsas_get_pd_list(struct mrsas_softc *sc);
76 static int mrsas_get_ld_list(struct mrsas_softc *sc);
77 static int mrsas_setup_irq(struct mrsas_softc *sc);
78 static int mrsas_alloc_mem(struct mrsas_softc *sc);
79 static int mrsas_init_fw(struct mrsas_softc *sc);
80 static int mrsas_setup_raidmap(struct mrsas_softc *sc);
81 static void megasas_setup_jbod_map(struct mrsas_softc *sc);
82 static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
83 static int mrsas_clear_intr(struct mrsas_softc *sc);
84 static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
85 static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
86 static int
87 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
88     struct mrsas_mfi_cmd *cmd_to_abort);
89 static void
90 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id);
91 static struct mrsas_softc *
92 mrsas_get_softc_instance(struct cdev *dev,
93     u_long cmd, caddr_t arg);
94 u_int32_t
95 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset);
96 u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
97 u_int8_t
98 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
99     struct mrsas_mfi_cmd *mfi_cmd);
100 void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
101 int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
102 int	mrsas_init_adapter(struct mrsas_softc *sc);
103 int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
104 int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
105 int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
106 int	mrsas_ioc_init(struct mrsas_softc *sc);
107 int	mrsas_bus_scan(struct mrsas_softc *sc);
108 int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109 int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110 int	mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
111 int	mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
112 int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
113 int mrsas_reset_targets(struct mrsas_softc *sc);
114 int
115 mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
116     struct mrsas_mfi_cmd *cmd);
117 int
118 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
119     int size);
120 void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
121 void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
122 void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
123 void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
124 void	mrsas_disable_intr(struct mrsas_softc *sc);
125 void	mrsas_enable_intr(struct mrsas_softc *sc);
126 void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
127 void	mrsas_free_mem(struct mrsas_softc *sc);
128 void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
129 void	mrsas_isr(void *arg);
130 void	mrsas_teardown_intr(struct mrsas_softc *sc);
131 void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
132 void	mrsas_kill_hba(struct mrsas_softc *sc);
133 void	mrsas_aen_handler(struct mrsas_softc *sc);
134 void
135 mrsas_write_reg(struct mrsas_softc *sc, int offset,
136     u_int32_t value);
137 void
138 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
139     u_int32_t req_desc_hi);
140 void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
141 void
142 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
143     struct mrsas_mfi_cmd *cmd, u_int8_t status);
144 struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
145 
146 MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
147         (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
148 
149 extern int mrsas_cam_attach(struct mrsas_softc *sc);
150 extern void mrsas_cam_detach(struct mrsas_softc *sc);
151 extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
152 extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
153 extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
154 extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
155 extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
156 extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
157 extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
159 extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
160 extern void mrsas_xpt_release(struct mrsas_softc *sc);
161 extern MRSAS_REQUEST_DESCRIPTOR_UNION *
162 mrsas_get_request_desc(struct mrsas_softc *sc,
163     u_int16_t index);
164 extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
165 static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
166 static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
167 void	mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
168 
169 void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd,
170 	union ccb *ccb_ptr, u_int8_t status, u_int8_t extStatus,
171 	u_int32_t data_length, u_int8_t *sense);
172 void
173 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
174     u_int32_t req_desc_hi);
175 
176 SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
177     "MRSAS Driver Parameters");
178 
179 /*
180  * PCI device struct and table
181  *
182  */
183 typedef struct mrsas_ident {
184 	uint16_t vendor;
185 	uint16_t device;
186 	uint16_t subvendor;
187 	uint16_t subdevice;
188 	const char *desc;
189 }	MRSAS_CTLR_ID;
190 
191 MRSAS_CTLR_ID device_table[] = {
192 	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
193 	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
194 	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
195 	{0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
196 	{0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
197 	{0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"},
198 	{0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"},
199 	{0x1000, MRSAS_VENTURA, 0xffff, 0xffff, "AVAGO Ventura SAS Controller"},
200 	{0x1000, MRSAS_CRUSADER, 0xffff, 0xffff, "AVAGO Crusader SAS Controller"},
201 	{0x1000, MRSAS_HARPOON, 0xffff, 0xffff, "AVAGO Harpoon SAS Controller"},
202 	{0x1000, MRSAS_TOMCAT, 0xffff, 0xffff, "AVAGO Tomcat SAS Controller"},
203 	{0x1000, MRSAS_VENTURA_4PORT, 0xffff, 0xffff, "AVAGO Ventura_4Port SAS Controller"},
204 	{0x1000, MRSAS_CRUSADER_4PORT, 0xffff, 0xffff, "AVAGO Crusader_4Port SAS Controller"},
205 	{0x1000, MRSAS_AERO_10E0, 0xffff, 0xffff, "BROADCOM AERO-10E0 SAS Controller"},
206 	{0x1000, MRSAS_AERO_10E1, 0xffff, 0xffff, "BROADCOM AERO-10E1 SAS Controller"},
207 	{0x1000, MRSAS_AERO_10E2, 0xffff, 0xffff, "BROADCOM AERO-10E2 SAS Controller"},
208 	{0x1000, MRSAS_AERO_10E3, 0xffff, 0xffff, "BROADCOM AERO-10E3 SAS Controller"},
209 	{0x1000, MRSAS_AERO_10E4, 0xffff, 0xffff, "BROADCOM AERO-10E4 SAS Controller"},
210 	{0x1000, MRSAS_AERO_10E5, 0xffff, 0xffff, "BROADCOM AERO-10E5 SAS Controller"},
211 	{0x1000, MRSAS_AERO_10E6, 0xffff, 0xffff, "BROADCOM AERO-10E6 SAS Controller"},
212 	{0x1000, MRSAS_AERO_10E7, 0xffff, 0xffff, "BROADCOM AERO-10E7 SAS Controller"},
213 	{0, 0, 0, 0, NULL}
214 };
215 
216 /*
217  * Character device entry points
218  *
219  */
220 static struct cdevsw mrsas_cdevsw = {
221 	.d_version = D_VERSION,
222 	.d_open = mrsas_open,
223 	.d_close = mrsas_close,
224 	.d_ioctl = mrsas_ioctl,
225 	.d_poll = mrsas_poll,
226 	.d_name = "mrsas",
227 };
228 
229 MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
230 
231 int
mrsas_open(struct cdev * dev,int oflags,int devtype,struct thread * td)232 mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
233 {
234 
235 	return (0);
236 }
237 
238 int
mrsas_close(struct cdev * dev,int fflag,int devtype,struct thread * td)239 mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
240 {
241 
242 	return (0);
243 }
244 
245 u_int32_t
mrsas_read_reg_with_retries(struct mrsas_softc * sc,int offset)246 mrsas_read_reg_with_retries(struct mrsas_softc *sc, int offset)
247 {
248 	u_int32_t i = 0, ret_val;
249 
250 	if (sc->is_aero) {
251 		do {
252 			ret_val = mrsas_read_reg(sc, offset);
253 			i++;
254 		} while(ret_val == 0 && i < 3);
255 	} else
256 		ret_val = mrsas_read_reg(sc, offset);
257 
258 	return ret_val;
259 }
260 
261 /*
262  * Register Read/Write Functions
263  *
264  */
265 void
mrsas_write_reg(struct mrsas_softc * sc,int offset,u_int32_t value)266 mrsas_write_reg(struct mrsas_softc *sc, int offset,
267     u_int32_t value)
268 {
269 	bus_space_tag_t bus_tag = sc->bus_tag;
270 	bus_space_handle_t bus_handle = sc->bus_handle;
271 
272 	bus_space_write_4(bus_tag, bus_handle, offset, value);
273 }
274 
275 u_int32_t
mrsas_read_reg(struct mrsas_softc * sc,int offset)276 mrsas_read_reg(struct mrsas_softc *sc, int offset)
277 {
278 	bus_space_tag_t bus_tag = sc->bus_tag;
279 	bus_space_handle_t bus_handle = sc->bus_handle;
280 
281 	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
282 }
283 
284 /*
285  * Interrupt Disable/Enable/Clear Functions
286  *
287  */
288 void
mrsas_disable_intr(struct mrsas_softc * sc)289 mrsas_disable_intr(struct mrsas_softc *sc)
290 {
291 	u_int32_t mask = 0xFFFFFFFF;
292 
293 	sc->mask_interrupts = 1;
294 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
295 	/* Dummy read to force pci flush */
296 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
297 }
298 
299 void
mrsas_enable_intr(struct mrsas_softc * sc)300 mrsas_enable_intr(struct mrsas_softc *sc)
301 {
302 	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
303 
304 	sc->mask_interrupts = 0;
305 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
306 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
307 
308 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
309 	(void)mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
310 }
311 
312 static int
mrsas_clear_intr(struct mrsas_softc * sc)313 mrsas_clear_intr(struct mrsas_softc *sc)
314 {
315 	u_int32_t status;
316 
317 	/* Read received interrupt */
318 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_intr_status));
319 
320 	/* Not our interrupt, so just return */
321 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
322 		return (0);
323 
324 	/* We got a reply interrupt */
325 	return (1);
326 }
327 
328 /*
329  * PCI Support Functions
330  *
331  */
332 static struct mrsas_ident *
mrsas_find_ident(device_t dev)333 mrsas_find_ident(device_t dev)
334 {
335 	struct mrsas_ident *pci_device;
336 
337 	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
338 		if ((pci_device->vendor == pci_get_vendor(dev)) &&
339 		    (pci_device->device == pci_get_device(dev)) &&
340 		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
341 		    (pci_device->subvendor == 0xffff)) &&
342 		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
343 		    (pci_device->subdevice == 0xffff)))
344 			return (pci_device);
345 	}
346 	return (NULL);
347 }
348 
349 static int
mrsas_probe(device_t dev)350 mrsas_probe(device_t dev)
351 {
352 	struct mrsas_ident *id;
353 
354 	if ((id = mrsas_find_ident(dev)) != NULL) {
355 		device_set_desc(dev, id->desc);
356 		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
357 		return (-30);
358 	}
359 	return (ENXIO);
360 }
361 
362 /*
363  * mrsas_setup_sysctl:	setup sysctl values for mrsas
364  * input:				Adapter instance soft state
365  *
366  * Setup sysctl entries for mrsas driver.
367  */
368 static void
mrsas_setup_sysctl(struct mrsas_softc * sc)369 mrsas_setup_sysctl(struct mrsas_softc *sc)
370 {
371 	struct sysctl_ctx_list *sysctl_ctx = NULL;
372 	struct sysctl_oid *sysctl_tree = NULL;
373 	char tmpstr[80], tmpstr2[80];
374 
375 	/*
376 	 * Setup the sysctl variable so the user can change the debug level
377 	 * on the fly.
378 	 */
379 	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
380 	    device_get_unit(sc->mrsas_dev));
381 	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
382 
383 	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
384 	if (sysctl_ctx != NULL)
385 		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
386 
387 	if (sysctl_tree == NULL) {
388 		sysctl_ctx_init(&sc->sysctl_ctx);
389 		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
390 		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
391 		    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
392 		if (sc->sysctl_tree == NULL)
393 			return;
394 		sysctl_ctx = &sc->sysctl_ctx;
395 		sysctl_tree = sc->sysctl_tree;
396 	}
397 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
398 	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
399 	    "Disable the use of OCR");
400 
401 	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
402 	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
403 	    strlen(MRSAS_VERSION), "driver version");
404 
405 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 	    OID_AUTO, "reset_count", CTLFLAG_RD,
407 	    &sc->reset_count, 0, "number of ocr from start of the day");
408 
409 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
411 	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
412 
413 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
415 	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
416 
417 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
419 	    "Driver debug level");
420 
421 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
423 	    0, "Driver IO timeout value in mili-second.");
424 
425 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
427 	    &sc->mrsas_fw_fault_check_delay,
428 	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
429 
430 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
431 	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
432 	    &sc->reset_in_progress, 0, "ocr in progress status");
433 
434 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
435 	    OID_AUTO, "block_sync_cache", CTLFLAG_RW,
436 	    &sc->block_sync_cache, 0,
437 	    "Block SYNC CACHE at driver. <default: 0, send it to FW>");
438 	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
439 	    OID_AUTO, "stream detection", CTLFLAG_RW,
440 		&sc->drv_stream_detection, 0,
441 		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
442 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
443 	    OID_AUTO, "prp_count", CTLFLAG_RD,
444 	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
445 	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
446 	    OID_AUTO, "SGE holes", CTLFLAG_RD,
447 	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
448 }
449 
450 /*
451  * mrsas_get_tunables:	get tunable parameters.
452  * input:				Adapter instance soft state
453  *
454  * Get tunable parameters. This will help to debug driver at boot time.
455  */
456 static void
mrsas_get_tunables(struct mrsas_softc * sc)457 mrsas_get_tunables(struct mrsas_softc *sc)
458 {
459 	char tmpstr[80];
460 
461 	/* XXX default to some debugging for now */
462 	sc->mrsas_debug =
463 		(MRSAS_FAULT | MRSAS_OCR | MRSAS_INFO | MRSAS_TRACE | MRSAS_AEN);
464 	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
465 	sc->mrsas_fw_fault_check_delay = 1;
466 	sc->reset_count = 0;
467 	sc->reset_in_progress = 0;
468 	sc->block_sync_cache = 0;
469 	sc->drv_stream_detection = 1;
470 
471 	/*
472 	 * Grab the global variables.
473 	 */
474 	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
475 
476 	/*
477 	 * Grab the global variables.
478 	 */
479 	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
480 
481 	/* Grab the unit-instance variables */
482 	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
483 	    device_get_unit(sc->mrsas_dev));
484 	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
485 }
486 
487 /*
488  * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
489  * Used to get sequence number at driver load time.
490  * input:		Adapter soft state
491  *
492  * Allocates DMAable memory for the event log info internal command.
493  */
494 int
mrsas_alloc_evt_log_info_cmd(struct mrsas_softc * sc)495 mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
496 {
497 	int el_info_size;
498 
499 	/* Allocate get event log info command */
500 	el_info_size = sizeof(struct mrsas_evt_log_info);
501 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
502 	    1, 0,
503 	    BUS_SPACE_MAXADDR_32BIT,
504 	    BUS_SPACE_MAXADDR,
505 	    NULL, NULL,
506 	    el_info_size,
507 	    1,
508 	    el_info_size,
509 	    BUS_DMA_ALLOCNOW,
510 	    NULL, NULL,
511 	    &sc->el_info_tag)) {
512 		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
513 		return (ENOMEM);
514 	}
515 	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
516 	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
517 		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
518 		return (ENOMEM);
519 	}
520 	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
521 	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
522 	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
523 		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
524 		return (ENOMEM);
525 	}
526 	memset(sc->el_info_mem, 0, el_info_size);
527 	return (0);
528 }
529 
530 /*
531  * mrsas_free_evt_info_cmd:	Free memory for Event log info command
532  * input:					Adapter soft state
533  *
534  * Deallocates memory for the event log info internal command.
535  */
536 void
mrsas_free_evt_log_info_cmd(struct mrsas_softc * sc)537 mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
538 {
539 	if (sc->el_info_phys_addr)
540 		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
541 	if (sc->el_info_mem != NULL)
542 		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
543 	if (sc->el_info_tag != NULL)
544 		bus_dma_tag_destroy(sc->el_info_tag);
545 }
546 
547 /*
548  *  mrsas_get_seq_num:	Get latest event sequence number
549  *  @sc:				Adapter soft state
550  *  @eli:				Firmware event log sequence number information.
551  *
552  * Firmware maintains a log of all events in a non-volatile area.
553  * Driver get the sequence number using DCMD
554  * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
555  */
556 
557 static int
mrsas_get_seq_num(struct mrsas_softc * sc,struct mrsas_evt_log_info * eli)558 mrsas_get_seq_num(struct mrsas_softc *sc,
559     struct mrsas_evt_log_info *eli)
560 {
561 	struct mrsas_mfi_cmd *cmd;
562 	struct mrsas_dcmd_frame *dcmd;
563 	u_int8_t do_ocr = 1, retcode = 0;
564 
565 	cmd = mrsas_get_mfi_cmd(sc);
566 
567 	if (!cmd) {
568 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
569 		return -ENOMEM;
570 	}
571 	dcmd = &cmd->frame->dcmd;
572 
573 	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
574 		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
575 		mrsas_release_mfi_cmd(cmd);
576 		return -ENOMEM;
577 	}
578 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
579 
580 	dcmd->cmd = MFI_CMD_DCMD;
581 	dcmd->cmd_status = 0x0;
582 	dcmd->sge_count = 1;
583 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
584 	dcmd->timeout = 0;
585 	dcmd->pad_0 = 0;
586 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_log_info));
587 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_GET_INFO);
588 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->el_info_phys_addr & 0xFFFFFFFF);
589 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_log_info));
590 
591 	retcode = mrsas_issue_blocked_cmd(sc, cmd);
592 	if (retcode == ETIMEDOUT)
593 		goto dcmd_timeout;
594 
595 	do_ocr = 0;
596 	/*
597 	 * Copy the data back into callers buffer
598 	 */
599 	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
600 	mrsas_free_evt_log_info_cmd(sc);
601 
602 dcmd_timeout:
603 	if (do_ocr)
604 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
605 	else
606 		mrsas_release_mfi_cmd(cmd);
607 
608 	return retcode;
609 }
610 
611 /*
612  *  mrsas_register_aen:		Register for asynchronous event notification
613  *  @sc:			Adapter soft state
614  *  @seq_num:			Starting sequence number
615  *  @class_locale:		Class of the event
616  *
617  *  This function subscribes for events beyond the @seq_num
618  *  and type @class_locale.
619  *
620  */
621 static int
mrsas_register_aen(struct mrsas_softc * sc,u_int32_t seq_num,u_int32_t class_locale_word)622 mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
623     u_int32_t class_locale_word)
624 {
625 	int ret_val;
626 	struct mrsas_mfi_cmd *cmd;
627 	struct mrsas_dcmd_frame *dcmd;
628 	union mrsas_evt_class_locale curr_aen;
629 	union mrsas_evt_class_locale prev_aen;
630 
631 	/*
632 	 * If there an AEN pending already (aen_cmd), check if the
633 	 * class_locale of that pending AEN is inclusive of the new AEN
634 	 * request we currently have. If it is, then we don't have to do
635 	 * anything. In other words, whichever events the current AEN request
636 	 * is subscribing to, have already been subscribed to. If the old_cmd
637 	 * is _not_ inclusive, then we have to abort that command, form a
638 	 * class_locale that is superset of both old and current and re-issue
639 	 * to the FW
640 	 */
641 
642 	curr_aen.word = class_locale_word;
643 
644 	if (sc->aen_cmd) {
645 		prev_aen.word = le32toh(sc->aen_cmd->frame->dcmd.mbox.w[1]);
646 
647 		/*
648 		 * A class whose enum value is smaller is inclusive of all
649 		 * higher values. If a PROGRESS (= -1) was previously
650 		 * registered, then a new registration requests for higher
651 		 * classes need not be sent to FW. They are automatically
652 		 * included. Locale numbers don't have such hierarchy. They
653 		 * are bitmap values
654 		 */
655 		if ((prev_aen.members.class <= curr_aen.members.class) &&
656 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
657 		    curr_aen.members.locale)) {
658 			/*
659 			 * Previously issued event registration includes
660 			 * current request. Nothing to do.
661 			 */
662 			return 0;
663 		} else {
664 			curr_aen.members.locale |= prev_aen.members.locale;
665 
666 			if (prev_aen.members.class < curr_aen.members.class)
667 				curr_aen.members.class = prev_aen.members.class;
668 
669 			sc->aen_cmd->abort_aen = 1;
670 			ret_val = mrsas_issue_blocked_abort_cmd(sc,
671 			    sc->aen_cmd);
672 
673 			if (ret_val) {
674 				printf("mrsas: Failed to abort previous AEN command\n");
675 				return ret_val;
676 			} else
677 				sc->aen_cmd = NULL;
678 		}
679 	}
680 	cmd = mrsas_get_mfi_cmd(sc);
681 	if (!cmd)
682 		return ENOMEM;
683 
684 	dcmd = &cmd->frame->dcmd;
685 
686 	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
687 
688 	/*
689 	 * Prepare DCMD for aen registration
690 	 */
691 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
692 
693 	dcmd->cmd = MFI_CMD_DCMD;
694 	dcmd->cmd_status = 0x0;
695 	dcmd->sge_count = 1;
696 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
697 	dcmd->timeout = 0;
698 	dcmd->pad_0 = 0;
699 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_evt_detail));
700 	dcmd->opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
701 	dcmd->mbox.w[0] = htole32(seq_num);
702 	sc->last_seq_num = seq_num;
703 	dcmd->mbox.w[1] = htole32(curr_aen.word);
704 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->evt_detail_phys_addr & 0xFFFFFFFF);
705 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_evt_detail));
706 
707 	if (sc->aen_cmd != NULL) {
708 		mrsas_release_mfi_cmd(cmd);
709 		return 0;
710 	}
711 	/*
712 	 * Store reference to the cmd used to register for AEN. When an
713 	 * application wants us to register for AEN, we have to abort this
714 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
715 	 */
716 	sc->aen_cmd = cmd;
717 
718 	/*
719 	 * Issue the aen registration frame
720 	 */
721 	if (mrsas_issue_dcmd(sc, cmd)) {
722 		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
723 		return (1);
724 	}
725 	return 0;
726 }
727 
728 /*
729  * mrsas_start_aen:	Subscribes to AEN during driver load time
730  * @instance:		Adapter soft state
731  */
732 static int
mrsas_start_aen(struct mrsas_softc * sc)733 mrsas_start_aen(struct mrsas_softc *sc)
734 {
735 	struct mrsas_evt_log_info eli;
736 	union mrsas_evt_class_locale class_locale;
737 
738 	/* Get the latest sequence number from FW */
739 
740 	memset(&eli, 0, sizeof(eli));
741 
742 	if (mrsas_get_seq_num(sc, &eli))
743 		return -1;
744 
745 	/* Register AEN with FW for latest sequence number plus 1 */
746 	class_locale.members.reserved = 0;
747 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
748 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
749 
750 	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
751 	    class_locale.word);
752 
753 }
754 
755 /*
756  * mrsas_setup_msix:	Allocate MSI-x vectors
757  * @sc:					adapter soft state
758  */
759 static int
mrsas_setup_msix(struct mrsas_softc * sc)760 mrsas_setup_msix(struct mrsas_softc *sc)
761 {
762 	int i;
763 
764 	for (i = 0; i < sc->msix_vectors; i++) {
765 		sc->irq_context[i].sc = sc;
766 		sc->irq_context[i].MSIxIndex = i;
767 		sc->irq_id[i] = i + 1;
768 		sc->mrsas_irq[i] = bus_alloc_resource_any
769 		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
770 		    ,RF_ACTIVE);
771 		if (sc->mrsas_irq[i] == NULL) {
772 			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
773 			goto irq_alloc_failed;
774 		}
775 		if (bus_setup_intr(sc->mrsas_dev,
776 		    sc->mrsas_irq[i],
777 		    INTR_MPSAFE | INTR_TYPE_CAM,
778 		    NULL, mrsas_isr, &sc->irq_context[i],
779 		    &sc->intr_handle[i])) {
780 			device_printf(sc->mrsas_dev,
781 			    "Cannot set up MSI-x interrupt handler\n");
782 			goto irq_alloc_failed;
783 		}
784 	}
785 	return SUCCESS;
786 
787 irq_alloc_failed:
788 	mrsas_teardown_intr(sc);
789 	return (FAIL);
790 }
791 
792 /*
793  * mrsas_allocate_msix:		Setup MSI-x vectors
794  * @sc:						adapter soft state
795  */
796 static int
mrsas_allocate_msix(struct mrsas_softc * sc)797 mrsas_allocate_msix(struct mrsas_softc *sc)
798 {
799 	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
800 		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
801 		    " of vectors\n", sc->msix_vectors);
802 	} else {
803 		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
804 		goto irq_alloc_failed;
805 	}
806 	return SUCCESS;
807 
808 irq_alloc_failed:
809 	mrsas_teardown_intr(sc);
810 	return (FAIL);
811 }
812 
813 /*
814  * mrsas_attach:	PCI entry point
815  * input:			pointer to device struct
816  *
817  * Performs setup of PCI and registers, initializes mutexes and linked lists,
818  * registers interrupts and CAM, and initializes   the adapter/controller to
819  * its proper state.
820  */
821 static int
mrsas_attach(device_t dev)822 mrsas_attach(device_t dev)
823 {
824 	struct mrsas_softc *sc = device_get_softc(dev);
825 	uint32_t cmd, error;
826 
827 	device_printf(dev, "AVAGO MegaRAID SAS driver version: %s\n",
828 		MRSAS_VERSION);
829 
830 	memset(sc, 0, sizeof(struct mrsas_softc));
831 
832 	/* Look up our softc and initialize its fields. */
833 	sc->mrsas_dev = dev;
834 	sc->device_id = pci_get_device(dev);
835 
836 	switch (sc->device_id) {
837 	case MRSAS_INVADER:
838 	case MRSAS_FURY:
839 	case MRSAS_INTRUDER:
840 	case MRSAS_INTRUDER_24:
841 	case MRSAS_CUTLASS_52:
842 	case MRSAS_CUTLASS_53:
843 		sc->mrsas_gen3_ctrl = 1;
844 		break;
845 	case MRSAS_VENTURA:
846 	case MRSAS_CRUSADER:
847 	case MRSAS_HARPOON:
848 	case MRSAS_TOMCAT:
849 	case MRSAS_VENTURA_4PORT:
850 	case MRSAS_CRUSADER_4PORT:
851 		sc->is_ventura = true;
852 		break;
853 	case MRSAS_AERO_10E1:
854 	case MRSAS_AERO_10E5:
855 		device_printf(dev, "Adapter is in configurable secure mode\n");
856 	case MRSAS_AERO_10E2:
857 	case MRSAS_AERO_10E6:
858 		sc->is_aero = true;
859 		break;
860 	case MRSAS_AERO_10E0:
861 	case MRSAS_AERO_10E3:
862 	case MRSAS_AERO_10E4:
863 	case MRSAS_AERO_10E7:
864 		device_printf(dev, "Adapter is in non-secure mode\n");
865 		return SUCCESS;
866 	}
867 
868 	mrsas_get_tunables(sc);
869 
870 	/*
871 	 * Set up PCI and registers
872 	 */
873 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
874 	/* Force the busmaster enable bit on. */
875 	cmd |= PCIM_CMD_BUSMASTEREN;
876 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
877 
878 	/* For Ventura/Aero system registers are mapped to BAR0 */
879 	if (sc->is_ventura || sc->is_aero)
880 		sc->reg_res_id = PCIR_BAR(0);	/* BAR0 offset */
881 	else
882 		sc->reg_res_id = PCIR_BAR(1);	/* BAR1 offset */
883 
884 	if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
885 	    &(sc->reg_res_id), RF_ACTIVE))
886 	    == NULL) {
887 		device_printf(dev, "Cannot allocate PCI registers\n");
888 		goto attach_fail;
889 	}
890 	sc->bus_tag = rman_get_bustag(sc->reg_res);
891 	sc->bus_handle = rman_get_bushandle(sc->reg_res);
892 
893 	/* Intialize mutexes */
894 	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
895 	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
896 	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
897 	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
898 	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
899 	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
900 	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
901 	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
902 	mtx_init(&sc->stream_lock, "mrsas_stream_lock", NULL, MTX_DEF);
903 
904 	/* Intialize linked list */
905 	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
906 	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
907 
908 	mrsas_atomic_set(&sc->fw_outstanding, 0);
909 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
910 	mrsas_atomic_set(&sc->prp_count, 0);
911 	mrsas_atomic_set(&sc->sge_holes, 0);
912 
913 	sc->io_cmds_highwater = 0;
914 
915 	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
916 	sc->UnevenSpanSupport = 0;
917 
918 	sc->msix_enable = 0;
919 
920 	/* Initialize Firmware */
921 	if (mrsas_init_fw(sc) != SUCCESS) {
922 		goto attach_fail_fw;
923 	}
924 	/* Register mrsas to CAM layer */
925 	if ((mrsas_cam_attach(sc) != SUCCESS)) {
926 		goto attach_fail_cam;
927 	}
928 	/* Register IRQs */
929 	if (mrsas_setup_irq(sc) != SUCCESS) {
930 		goto attach_fail_irq;
931 	}
932 	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
933 	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
934 	    device_get_unit(sc->mrsas_dev));
935 	if (error) {
936 		device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
937 		goto attach_fail_ocr_thread;
938 	}
939 	/*
940 	 * After FW initialization and OCR thread creation
941 	 * we will defer the cdev creation, AEN setup on ICH callback
942 	 */
943 	sc->mrsas_ich.ich_func = mrsas_ich_startup;
944 	sc->mrsas_ich.ich_arg = sc;
945 	if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
946 		device_printf(sc->mrsas_dev, "Config hook is already established\n");
947 	}
948 	mrsas_setup_sysctl(sc);
949 	return SUCCESS;
950 
951 attach_fail_ocr_thread:
952 	if (sc->ocr_thread_active)
953 		wakeup(&sc->ocr_chan);
954 attach_fail_irq:
955 	mrsas_teardown_intr(sc);
956 attach_fail_cam:
957 	mrsas_cam_detach(sc);
958 attach_fail_fw:
959 	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
960 	if (sc->msix_enable == 1)
961 		pci_release_msi(sc->mrsas_dev);
962 	mrsas_free_mem(sc);
963 	mtx_destroy(&sc->sim_lock);
964 	mtx_destroy(&sc->aen_lock);
965 	mtx_destroy(&sc->pci_lock);
966 	mtx_destroy(&sc->io_lock);
967 	mtx_destroy(&sc->ioctl_lock);
968 	mtx_destroy(&sc->mpt_cmd_pool_lock);
969 	mtx_destroy(&sc->mfi_cmd_pool_lock);
970 	mtx_destroy(&sc->raidmap_lock);
971 	mtx_destroy(&sc->stream_lock);
972 attach_fail:
973 	if (sc->reg_res) {
974 		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
975 		    sc->reg_res_id, sc->reg_res);
976 	}
977 	return (ENXIO);
978 }
979 
980 /*
981  * Interrupt config hook
982  */
983 static void
mrsas_ich_startup(void * arg)984 mrsas_ich_startup(void *arg)
985 {
986 	int i = 0;
987 	struct mrsas_softc *sc = (struct mrsas_softc *)arg;
988 
989 	/*
990 	 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
991 	 */
992 	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS,
993 	    IOCTL_SEMA_DESCRIPTION);
994 
995 	/* Create a /dev entry for mrsas controller. */
996 	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
997 	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
998 	    device_get_unit(sc->mrsas_dev));
999 
1000 	if (device_get_unit(sc->mrsas_dev) == 0) {
1001 		make_dev_alias_p(MAKEDEV_CHECKNAME,
1002 		    &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
1003 		    "megaraid_sas_ioctl_node");
1004 	}
1005 	if (sc->mrsas_cdev)
1006 		sc->mrsas_cdev->si_drv1 = sc;
1007 
1008 	/*
1009 	 * Add this controller to mrsas_mgmt_info structure so that it can be
1010 	 * exported to management applications
1011 	 */
1012 	if (device_get_unit(sc->mrsas_dev) == 0)
1013 		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
1014 
1015 	mrsas_mgmt_info.count++;
1016 	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
1017 	mrsas_mgmt_info.max_index++;
1018 
1019 	/* Enable Interrupts */
1020 	mrsas_enable_intr(sc);
1021 
1022 	/* Call DCMD get_pd_info for all system PDs */
1023 	for (i = 0; i < MRSAS_MAX_PD; i++) {
1024 		if ((sc->target_list[i].target_id != 0xffff) &&
1025 			sc->pd_info_mem)
1026 			mrsas_get_pd_info(sc, sc->target_list[i].target_id);
1027 	}
1028 
1029 	/* Initiate AEN (Asynchronous Event Notification) */
1030 	if (mrsas_start_aen(sc)) {
1031 		device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
1032 		    "Further events from the controller will not be communicated.\n"
1033 		    "Either there is some problem in the controller"
1034 		    "or the controller does not support AEN.\n"
1035 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
1036 	}
1037 	if (sc->mrsas_ich.ich_arg != NULL) {
1038 		device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
1039 		config_intrhook_disestablish(&sc->mrsas_ich);
1040 		sc->mrsas_ich.ich_arg = NULL;
1041 	}
1042 }
1043 
1044 /*
1045  * mrsas_detach:	De-allocates and teardown resources
1046  * input:			pointer to device struct
1047  *
1048  * This function is the entry point for device disconnect and detach.
1049  * It performs memory de-allocations, shutdown of the controller and various
1050  * teardown and destroy resource functions.
1051  */
1052 static int
mrsas_detach(device_t dev)1053 mrsas_detach(device_t dev)
1054 {
1055 	struct mrsas_softc *sc;
1056 	int i = 0;
1057 
1058 	sc = device_get_softc(dev);
1059 	sc->remove_in_progress = 1;
1060 
1061 	/* Destroy the character device so no other IOCTL will be handled */
1062 	if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
1063 		destroy_dev(sc->mrsas_linux_emulator_cdev);
1064 	destroy_dev(sc->mrsas_cdev);
1065 
1066 	/*
1067 	 * Take the instance off the instance array. Note that we will not
1068 	 * decrement the max_index. We let this array be sparse array
1069 	 */
1070 	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
1071 		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
1072 			mrsas_mgmt_info.count--;
1073 			mrsas_mgmt_info.sc_ptr[i] = NULL;
1074 			break;
1075 		}
1076 	}
1077 
1078 	if (sc->ocr_thread_active)
1079 		wakeup(&sc->ocr_chan);
1080 	while (sc->reset_in_progress) {
1081 		i++;
1082 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1083 			mrsas_dprint(sc, MRSAS_INFO,
1084 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1085 		}
1086 		pause("mr_shutdown", hz);
1087 	}
1088 	i = 0;
1089 	while (sc->ocr_thread_active) {
1090 		i++;
1091 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1092 			mrsas_dprint(sc, MRSAS_INFO,
1093 			    "[%2d]waiting for "
1094 			    "mrsas_ocr thread to quit ocr %d\n", i,
1095 			    sc->ocr_thread_active);
1096 		}
1097 		pause("mr_shutdown", hz);
1098 	}
1099 	mrsas_flush_cache(sc);
1100 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1101 	mrsas_disable_intr(sc);
1102 
1103 	if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
1104 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
1105 			free(sc->streamDetectByLD[i], M_MRSAS);
1106 		free(sc->streamDetectByLD, M_MRSAS);
1107 		sc->streamDetectByLD = NULL;
1108 	}
1109 
1110 	mrsas_cam_detach(sc);
1111 	mrsas_teardown_intr(sc);
1112 	mrsas_free_mem(sc);
1113 	mtx_destroy(&sc->sim_lock);
1114 	mtx_destroy(&sc->aen_lock);
1115 	mtx_destroy(&sc->pci_lock);
1116 	mtx_destroy(&sc->io_lock);
1117 	mtx_destroy(&sc->ioctl_lock);
1118 	mtx_destroy(&sc->mpt_cmd_pool_lock);
1119 	mtx_destroy(&sc->mfi_cmd_pool_lock);
1120 	mtx_destroy(&sc->raidmap_lock);
1121 	mtx_destroy(&sc->stream_lock);
1122 
1123 	/* Wait for all the semaphores to be released */
1124 	while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS)
1125 		pause("mr_shutdown", hz);
1126 
1127 	/* Destroy the counting semaphore created for Ioctl */
1128 	sema_destroy(&sc->ioctl_count_sema);
1129 
1130 	if (sc->reg_res) {
1131 		bus_release_resource(sc->mrsas_dev,
1132 		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1133 	}
1134 	if (sc->sysctl_tree != NULL)
1135 		sysctl_ctx_free(&sc->sysctl_ctx);
1136 
1137 	return (0);
1138 }
1139 
1140 static int
mrsas_shutdown(device_t dev)1141 mrsas_shutdown(device_t dev)
1142 {
1143 	struct mrsas_softc *sc;
1144 	int i;
1145 
1146 	sc = device_get_softc(dev);
1147 	sc->remove_in_progress = 1;
1148 	if (!KERNEL_PANICKED()) {
1149 		if (sc->ocr_thread_active)
1150 			wakeup(&sc->ocr_chan);
1151 		i = 0;
1152 		while (sc->reset_in_progress && i < 15) {
1153 			i++;
1154 			if ((i % MRSAS_RESET_NOTICE_INTERVAL) == 0) {
1155 				mrsas_dprint(sc, MRSAS_INFO,
1156 				    "[%2d]waiting for OCR to be finished "
1157 				    "from %s\n", i, __func__);
1158 			}
1159 			pause("mr_shutdown", hz);
1160 		}
1161 		if (sc->reset_in_progress) {
1162 			mrsas_dprint(sc, MRSAS_INFO,
1163 			    "gave up waiting for OCR to be finished\n");
1164 			return (0);
1165 		}
1166 	}
1167 
1168 	mrsas_flush_cache(sc);
1169 	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1170 	mrsas_disable_intr(sc);
1171 	return (0);
1172 }
1173 
1174 /*
1175  * mrsas_free_mem:		Frees allocated memory
1176  * input:				Adapter instance soft state
1177  *
1178  * This function is called from mrsas_detach() to free previously allocated
1179  * memory.
1180  */
1181 void
mrsas_free_mem(struct mrsas_softc * sc)1182 mrsas_free_mem(struct mrsas_softc *sc)
1183 {
1184 	int i;
1185 	u_int32_t max_fw_cmds;
1186 	struct mrsas_mfi_cmd *mfi_cmd;
1187 	struct mrsas_mpt_cmd *mpt_cmd;
1188 
1189 	/*
1190 	 * Free RAID map memory
1191 	 */
1192 	for (i = 0; i < 2; i++) {
1193 		if (sc->raidmap_phys_addr[i])
1194 			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1195 		if (sc->raidmap_mem[i] != NULL)
1196 			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1197 		if (sc->raidmap_tag[i] != NULL)
1198 			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1199 
1200 		if (sc->ld_drv_map[i] != NULL)
1201 			free(sc->ld_drv_map[i], M_MRSAS);
1202 	}
1203 	for (i = 0; i < 2; i++) {
1204 		if (sc->jbodmap_phys_addr[i])
1205 			bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
1206 		if (sc->jbodmap_mem[i] != NULL)
1207 			bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
1208 		if (sc->jbodmap_tag[i] != NULL)
1209 			bus_dma_tag_destroy(sc->jbodmap_tag[i]);
1210 	}
1211 	/*
1212 	 * Free version buffer memory
1213 	 */
1214 	if (sc->verbuf_phys_addr)
1215 		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1216 	if (sc->verbuf_mem != NULL)
1217 		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1218 	if (sc->verbuf_tag != NULL)
1219 		bus_dma_tag_destroy(sc->verbuf_tag);
1220 
1221 	/*
1222 	 * Free sense buffer memory
1223 	 */
1224 	if (sc->sense_phys_addr)
1225 		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1226 	if (sc->sense_mem != NULL)
1227 		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1228 	if (sc->sense_tag != NULL)
1229 		bus_dma_tag_destroy(sc->sense_tag);
1230 
1231 	/*
1232 	 * Free chain frame memory
1233 	 */
1234 	if (sc->chain_frame_phys_addr)
1235 		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1236 	if (sc->chain_frame_mem != NULL)
1237 		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1238 	if (sc->chain_frame_tag != NULL)
1239 		bus_dma_tag_destroy(sc->chain_frame_tag);
1240 
1241 	/*
1242 	 * Free IO Request memory
1243 	 */
1244 	if (sc->io_request_phys_addr)
1245 		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1246 	if (sc->io_request_mem != NULL)
1247 		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1248 	if (sc->io_request_tag != NULL)
1249 		bus_dma_tag_destroy(sc->io_request_tag);
1250 
1251 	/*
1252 	 * Free Reply Descriptor memory
1253 	 */
1254 	if (sc->reply_desc_phys_addr)
1255 		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1256 	if (sc->reply_desc_mem != NULL)
1257 		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1258 	if (sc->reply_desc_tag != NULL)
1259 		bus_dma_tag_destroy(sc->reply_desc_tag);
1260 
1261 	/*
1262 	 * Free event detail memory
1263 	 */
1264 	if (sc->evt_detail_phys_addr)
1265 		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1266 	if (sc->evt_detail_mem != NULL)
1267 		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1268 	if (sc->evt_detail_tag != NULL)
1269 		bus_dma_tag_destroy(sc->evt_detail_tag);
1270 
1271 	/*
1272 	 * Free PD info memory
1273 	 */
1274 	if (sc->pd_info_phys_addr)
1275 		bus_dmamap_unload(sc->pd_info_tag, sc->pd_info_dmamap);
1276 	if (sc->pd_info_mem != NULL)
1277 		bus_dmamem_free(sc->pd_info_tag, sc->pd_info_mem, sc->pd_info_dmamap);
1278 	if (sc->pd_info_tag != NULL)
1279 		bus_dma_tag_destroy(sc->pd_info_tag);
1280 
1281 	/*
1282 	 * Free MFI frames
1283 	 */
1284 	if (sc->mfi_cmd_list) {
1285 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1286 			mfi_cmd = sc->mfi_cmd_list[i];
1287 			mrsas_free_frame(sc, mfi_cmd);
1288 		}
1289 	}
1290 	if (sc->mficmd_frame_tag != NULL)
1291 		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1292 
1293 	/*
1294 	 * Free MPT internal command list
1295 	 */
1296 	max_fw_cmds = sc->max_fw_cmds;
1297 	if (sc->mpt_cmd_list) {
1298 		for (i = 0; i < max_fw_cmds; i++) {
1299 			mpt_cmd = sc->mpt_cmd_list[i];
1300 			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1301 			free(sc->mpt_cmd_list[i], M_MRSAS);
1302 		}
1303 		free(sc->mpt_cmd_list, M_MRSAS);
1304 		sc->mpt_cmd_list = NULL;
1305 	}
1306 	/*
1307 	 * Free MFI internal command list
1308 	 */
1309 
1310 	if (sc->mfi_cmd_list) {
1311 		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1312 			free(sc->mfi_cmd_list[i], M_MRSAS);
1313 		}
1314 		free(sc->mfi_cmd_list, M_MRSAS);
1315 		sc->mfi_cmd_list = NULL;
1316 	}
1317 	/*
1318 	 * Free request descriptor memory
1319 	 */
1320 	free(sc->req_desc, M_MRSAS);
1321 	sc->req_desc = NULL;
1322 
1323 	/*
1324 	 * Destroy parent tag
1325 	 */
1326 	if (sc->mrsas_parent_tag != NULL)
1327 		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1328 
1329 	/*
1330 	 * Free ctrl_info memory
1331 	 */
1332 	if (sc->ctrl_info != NULL)
1333 		free(sc->ctrl_info, M_MRSAS);
1334 }
1335 
1336 /*
1337  * mrsas_teardown_intr:	Teardown interrupt
1338  * input:				Adapter instance soft state
1339  *
1340  * This function is called from mrsas_detach() to teardown and release bus
1341  * interrupt resourse.
1342  */
1343 void
mrsas_teardown_intr(struct mrsas_softc * sc)1344 mrsas_teardown_intr(struct mrsas_softc *sc)
1345 {
1346 	int i;
1347 
1348 	if (!sc->msix_enable) {
1349 		if (sc->intr_handle[0])
1350 			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1351 		if (sc->mrsas_irq[0] != NULL)
1352 			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1353 			    sc->irq_id[0], sc->mrsas_irq[0]);
1354 		sc->intr_handle[0] = NULL;
1355 	} else {
1356 		for (i = 0; i < sc->msix_vectors; i++) {
1357 			if (sc->intr_handle[i])
1358 				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1359 				    sc->intr_handle[i]);
1360 
1361 			if (sc->mrsas_irq[i] != NULL)
1362 				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1363 				    sc->irq_id[i], sc->mrsas_irq[i]);
1364 
1365 			sc->intr_handle[i] = NULL;
1366 		}
1367 		pci_release_msi(sc->mrsas_dev);
1368 	}
1369 
1370 }
1371 
1372 /*
1373  * mrsas_suspend:	Suspend entry point
1374  * input:			Device struct pointer
1375  *
1376  * This function is the entry point for system suspend from the OS.
1377  */
1378 static int
mrsas_suspend(device_t dev)1379 mrsas_suspend(device_t dev)
1380 {
1381 	/* This will be filled when the driver will have hibernation support */
1382 	return (0);
1383 }
1384 
1385 /*
1386  * mrsas_resume:	Resume entry point
1387  * input:			Device struct pointer
1388  *
1389  * This function is the entry point for system resume from the OS.
1390  */
1391 static int
mrsas_resume(device_t dev)1392 mrsas_resume(device_t dev)
1393 {
1394 	/* This will be filled when the driver will have hibernation support */
1395 	return (0);
1396 }
1397 
1398 /**
1399  * mrsas_get_softc_instance:    Find softc instance based on cmd type
1400  *
1401  * This function will return softc instance based on cmd type.
1402  * In some case, application fire ioctl on required management instance and
1403  * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1404  * case, else get the softc instance from host_no provided by application in
1405  * user data.
1406  */
1407 
1408 static struct mrsas_softc *
mrsas_get_softc_instance(struct cdev * dev,u_long cmd,caddr_t arg)1409 mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1410 {
1411 	struct mrsas_softc *sc = NULL;
1412 	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1413 
1414 	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1415 		sc = dev->si_drv1;
1416 	} else {
1417 		/*
1418 		 * get the Host number & the softc from data sent by the
1419 		 * Application
1420 		 */
1421 		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1422 		if (sc == NULL)
1423 			printf("There is no Controller number %d\n",
1424 			    user_ioc->host_no);
1425 		else if (user_ioc->host_no >= mrsas_mgmt_info.max_index)
1426 			mrsas_dprint(sc, MRSAS_FAULT,
1427 			    "Invalid Controller number %d\n", user_ioc->host_no);
1428 	}
1429 
1430 	return sc;
1431 }
1432 
1433 /*
1434  * mrsas_ioctl:	IOCtl commands entry point.
1435  *
1436  * This function is the entry point for IOCtls from the OS.  It calls the
1437  * appropriate function for processing depending on the command received.
1438  */
1439 static int
mrsas_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)1440 mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
1441     struct thread *td)
1442 {
1443 	struct mrsas_softc *sc;
1444 	int ret = 0, i = 0;
1445 	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1446 
1447 	switch (cmd) {
1448 	case MFIIO_PASSTHRU:
1449                 sc = (struct mrsas_softc *)(dev->si_drv1);
1450 		break;
1451 	default:
1452 		sc = mrsas_get_softc_instance(dev, cmd, arg);
1453 		break;
1454         }
1455 	if (!sc)
1456 		return ENOENT;
1457 
1458 	if (sc->remove_in_progress ||
1459 		(sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
1460 		mrsas_dprint(sc, MRSAS_INFO,
1461 		    "Either driver remove or shutdown called or "
1462 			"HW is in unrecoverable critical error state.\n");
1463 		return ENOENT;
1464 	}
1465 	mtx_lock_spin(&sc->ioctl_lock);
1466 	if (!sc->reset_in_progress) {
1467 		mtx_unlock_spin(&sc->ioctl_lock);
1468 		goto do_ioctl;
1469 	}
1470 	mtx_unlock_spin(&sc->ioctl_lock);
1471 	while (sc->reset_in_progress) {
1472 		i++;
1473 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1474 			mrsas_dprint(sc, MRSAS_INFO,
1475 			    "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
1476 		}
1477 		pause("mr_ioctl", hz);
1478 	}
1479 
1480 do_ioctl:
1481 	switch (cmd) {
1482 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1483 #ifdef COMPAT_FREEBSD32
1484 	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1485 #endif
1486 		/*
1487 		 * Decrement the Ioctl counting Semaphore before getting an
1488 		 * mfi command
1489 		 */
1490 		sema_wait(&sc->ioctl_count_sema);
1491 
1492 		ret = mrsas_passthru(sc, (void *)arg, cmd);
1493 
1494 		/* Increment the Ioctl counting semaphore value */
1495 		sema_post(&sc->ioctl_count_sema);
1496 
1497 		break;
1498 	case MRSAS_IOC_SCAN_BUS:
1499 		ret = mrsas_bus_scan(sc);
1500 		break;
1501 
1502 	case MRSAS_IOC_GET_PCI_INFO:
1503 		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1504 		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1505 		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1506 		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1507 		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1508 		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1509 		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1510 		    "pci device no: %d, pci function no: %d,"
1511 		    "pci domain ID: %d\n",
1512 		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1513 		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1514 		ret = 0;
1515 		break;
1516 
1517 	case MFIIO_PASSTHRU:
1518 		ret = mrsas_user_command(sc, (struct mfi_ioc_passthru *)arg);
1519 		break;
1520 
1521 	default:
1522 		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1523 		ret = ENOENT;
1524 	}
1525 
1526 	return (ret);
1527 }
1528 
1529 /*
1530  * mrsas_poll:	poll entry point for mrsas driver fd
1531  *
1532  * This function is the entry point for poll from the OS.  It waits for some AEN
1533  * events to be triggered from the controller and notifies back.
1534  */
1535 static int
mrsas_poll(struct cdev * dev,int poll_events,struct thread * td)1536 mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1537 {
1538 	struct mrsas_softc *sc;
1539 	int revents = 0;
1540 
1541 	sc = dev->si_drv1;
1542 
1543 	if (poll_events & (POLLIN | POLLRDNORM)) {
1544 		if (sc->mrsas_aen_triggered) {
1545 			revents |= poll_events & (POLLIN | POLLRDNORM);
1546 		}
1547 	}
1548 	if (revents == 0) {
1549 		if (poll_events & (POLLIN | POLLRDNORM)) {
1550 			mtx_lock(&sc->aen_lock);
1551 			sc->mrsas_poll_waiting = 1;
1552 			selrecord(td, &sc->mrsas_select);
1553 			mtx_unlock(&sc->aen_lock);
1554 		}
1555 	}
1556 	return revents;
1557 }
1558 
1559 /*
1560  * mrsas_setup_irq:	Set up interrupt
1561  * input:			Adapter instance soft state
1562  *
1563  * This function sets up interrupts as a bus resource, with flags indicating
1564  * resource permitting contemporaneous sharing and for resource to activate
1565  * atomically.
1566  */
1567 static int
mrsas_setup_irq(struct mrsas_softc * sc)1568 mrsas_setup_irq(struct mrsas_softc *sc)
1569 {
1570 	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1571 		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1572 
1573 	else {
1574 		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1575 		sc->irq_context[0].sc = sc;
1576 		sc->irq_context[0].MSIxIndex = 0;
1577 		sc->irq_id[0] = 0;
1578 		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1579 		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1580 		if (sc->mrsas_irq[0] == NULL) {
1581 			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1582 			    "interrupt\n");
1583 			return (FAIL);
1584 		}
1585 		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1586 		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1587 		    &sc->irq_context[0], &sc->intr_handle[0])) {
1588 			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1589 			    "interrupt\n");
1590 			return (FAIL);
1591 		}
1592 	}
1593 	return (0);
1594 }
1595 
1596 /*
1597  * mrsas_isr:	ISR entry point
1598  * input:		argument pointer
1599  *
1600  * This function is the interrupt service routine entry point.  There are two
1601  * types of interrupts, state change interrupt and response interrupt.  If an
1602  * interrupt is not ours, we just return.
1603  */
1604 void
mrsas_isr(void * arg)1605 mrsas_isr(void *arg)
1606 {
1607 	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1608 	struct mrsas_softc *sc = irq_context->sc;
1609 	int status = 0;
1610 
1611 	if (sc->mask_interrupts)
1612 		return;
1613 
1614 	if (!sc->msix_vectors) {
1615 		status = mrsas_clear_intr(sc);
1616 		if (!status)
1617 			return;
1618 	}
1619 	/* If we are resetting, bail */
1620 	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1621 		printf(" Entered into ISR when OCR is going active. \n");
1622 		mrsas_clear_intr(sc);
1623 		return;
1624 	}
1625 	/* Process for reply request and clear response interrupt */
1626 	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1627 		mrsas_clear_intr(sc);
1628 
1629 	return;
1630 }
1631 
1632 /*
1633  * mrsas_complete_cmd:	Process reply request
1634  * input:				Adapter instance soft state
1635  *
1636  * This function is called from mrsas_isr() to process reply request and clear
1637  * response interrupt. Processing of the reply request entails walking
1638  * through the reply descriptor array for the command request  pended from
1639  * Firmware.  We look at the Function field to determine the command type and
1640  * perform the appropriate action.  Before we return, we clear the response
1641  * interrupt.
1642  */
1643 int
mrsas_complete_cmd(struct mrsas_softc * sc,u_int32_t MSIxIndex)1644 mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1645 {
1646 	Mpi2ReplyDescriptorsUnion_t *desc;
1647 	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1648 	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1649 	struct mrsas_mpt_cmd *cmd_mpt, *r1_cmd = NULL;
1650 	struct mrsas_mfi_cmd *cmd_mfi;
1651 	u_int8_t reply_descript_type, *sense;
1652 	u_int16_t smid, num_completed;
1653 	u_int8_t status, extStatus;
1654 	union desc_value desc_val;
1655 	PLD_LOAD_BALANCE_INFO lbinfo;
1656 	u_int32_t device_id, data_length;
1657 	int threshold_reply_count = 0;
1658 #if TM_DEBUG
1659 	MR_TASK_MANAGE_REQUEST *mr_tm_req;
1660 	MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
1661 #endif
1662 
1663 	/* If we have a hardware error, not need to continue */
1664 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1665 		return (DONE);
1666 
1667 	desc = sc->reply_desc_mem;
1668 	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1669 	    + sc->last_reply_idx[MSIxIndex];
1670 
1671 	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1672 
1673 	desc_val.word = desc->Words;
1674 	num_completed = 0;
1675 
1676 	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1677 
1678 	/* Find our reply descriptor for the command and process */
1679 	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1680 		smid = le16toh(reply_desc->SMID);
1681 		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1682 		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1683 
1684 		status = scsi_io_req->RaidContext.raid_context.status;
1685 		extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
1686 		sense = cmd_mpt->sense;
1687 		data_length = scsi_io_req->DataLength;
1688 
1689 		switch (scsi_io_req->Function) {
1690 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
1691 #if TM_DEBUG
1692 			mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request;
1693 			mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)
1694 			    &mr_tm_req->TmRequest;
1695 			device_printf(sc->mrsas_dev, "TM completion type 0x%X, "
1696 			    "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
1697 #endif
1698             wakeup_one((void *)&sc->ocr_chan);
1699             break;
1700 		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1701 			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1702 			lbinfo = &sc->load_balance_info[device_id];
1703 			/* R1 load balancing for READ */
1704 			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1705 				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1706 				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1707 			}
1708 			/* Fall thru and complete IO */
1709 		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1710 			if (cmd_mpt->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
1711 				mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1712 				    extStatus, le32toh(data_length), sense);
1713 				mrsas_cmd_done(sc, cmd_mpt);
1714 				mrsas_atomic_dec(&sc->fw_outstanding);
1715 			} else {
1716 				/*
1717 				 * If the peer  Raid  1/10 fast path failed,
1718 				 * mark IO as failed to the scsi layer.
1719 				 * Overwrite the current status by the failed status
1720 				 * and make sure that if any command fails,
1721 				 * driver returns fail status to CAM.
1722 				 */
1723 				cmd_mpt->cmd_completed = 1;
1724 				r1_cmd = cmd_mpt->peer_cmd;
1725 				if (r1_cmd->cmd_completed) {
1726 					if (r1_cmd->io_request->RaidContext.raid_context.status != MFI_STAT_OK) {
1727 						status = r1_cmd->io_request->RaidContext.raid_context.status;
1728 						extStatus = r1_cmd->io_request->RaidContext.raid_context.exStatus;
1729 						data_length = r1_cmd->io_request->DataLength;
1730 						sense = r1_cmd->sense;
1731 					}
1732 					mtx_lock(&sc->sim_lock);
1733 					r1_cmd->ccb_ptr = NULL;
1734 					if (r1_cmd->callout_owner) {
1735 						callout_stop(&r1_cmd->cm_callout);
1736 						r1_cmd->callout_owner  = false;
1737 					}
1738 					mtx_unlock(&sc->sim_lock);
1739 					mrsas_release_mpt_cmd(r1_cmd);
1740 					mrsas_atomic_dec(&sc->fw_outstanding);
1741 					mrsas_map_mpt_cmd_status(cmd_mpt, cmd_mpt->ccb_ptr, status,
1742 					    extStatus, le32toh(data_length), sense);
1743 					mrsas_cmd_done(sc, cmd_mpt);
1744 					mrsas_atomic_dec(&sc->fw_outstanding);
1745 				}
1746 			}
1747 			break;
1748 		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1749 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1750 			/*
1751 			 * Make sure NOT TO release the mfi command from the called
1752 			 * function's context if it is fired with issue_polled call.
1753 			 * And also make sure that the issue_polled call should only be
1754 			 * used if INTERRUPT IS DISABLED.
1755 			 */
1756 			if (cmd_mfi->frame->hdr.flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
1757 				mrsas_release_mfi_cmd(cmd_mfi);
1758 			else
1759 				mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1760 			break;
1761 		}
1762 
1763 		sc->last_reply_idx[MSIxIndex]++;
1764 		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1765 			sc->last_reply_idx[MSIxIndex] = 0;
1766 
1767 		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1768 							 * 0xFFFFFFFFs */
1769 		num_completed++;
1770 		threshold_reply_count++;
1771 
1772 		/* Get the next reply descriptor */
1773 		if (!sc->last_reply_idx[MSIxIndex]) {
1774 			desc = sc->reply_desc_mem;
1775 			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1776 		} else
1777 			desc++;
1778 
1779 		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1780 		desc_val.word = desc->Words;
1781 
1782 		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1783 
1784 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1785 			break;
1786 
1787 		/*
1788 		 * Write to reply post index after completing threshold reply
1789 		 * count and still there are more replies in reply queue
1790 		 * pending to be completed.
1791 		 */
1792 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1793 			if (sc->msix_enable) {
1794 				if (sc->msix_combined)
1795 					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1796 					    ((MSIxIndex & 0x7) << 24) |
1797 					    sc->last_reply_idx[MSIxIndex]);
1798 				else
1799 					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1800 					    sc->last_reply_idx[MSIxIndex]);
1801 			} else
1802 				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1803 				    reply_post_host_index), sc->last_reply_idx[0]);
1804 
1805 			threshold_reply_count = 0;
1806 		}
1807 	}
1808 
1809 	/* No match, just return */
1810 	if (num_completed == 0)
1811 		return (DONE);
1812 
1813 	/* Clear response interrupt */
1814 	if (sc->msix_enable) {
1815 		if (sc->msix_combined) {
1816 			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1817 			    ((MSIxIndex & 0x7) << 24) |
1818 			    sc->last_reply_idx[MSIxIndex]);
1819 		} else
1820 			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1821 			    sc->last_reply_idx[MSIxIndex]);
1822 	} else
1823 		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1824 		    reply_post_host_index), sc->last_reply_idx[0]);
1825 
1826 	return (0);
1827 }
1828 
1829 /*
1830  * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1831  * input:						Adapter instance soft state
1832  *
1833  * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1834  * It checks the command status and maps the appropriate CAM status for the
1835  * CCB.
1836  */
1837 void
mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd * cmd,union ccb * ccb_ptr,u_int8_t status,u_int8_t extStatus,u_int32_t data_length,u_int8_t * sense)1838 mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, union ccb *ccb_ptr, u_int8_t status,
1839     u_int8_t extStatus, u_int32_t data_length, u_int8_t *sense)
1840 {
1841 	struct mrsas_softc *sc = cmd->sc;
1842 	u_int8_t *sense_data;
1843 
1844 	switch (status) {
1845 	case MFI_STAT_OK:
1846 		ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1847 		break;
1848 	case MFI_STAT_SCSI_IO_FAILED:
1849 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1850 		ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1851 		sense_data = (u_int8_t *)&ccb_ptr->csio.sense_data;
1852 		if (sense_data) {
1853 			/* For now just copy 18 bytes back */
1854 			memcpy(sense_data, sense, 18);
1855 			ccb_ptr->csio.sense_len = 18;
1856 			ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1857 		}
1858 		break;
1859 	case MFI_STAT_LD_OFFLINE:
1860 	case MFI_STAT_DEVICE_NOT_FOUND:
1861 		if (ccb_ptr->ccb_h.target_lun)
1862 			ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1863 		else
1864 			ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1865 		break;
1866 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1867 		ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1868 		break;
1869 	default:
1870 		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1871 		ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1872 		ccb_ptr->csio.scsi_status = status;
1873 	}
1874 	return;
1875 }
1876 
1877 /*
1878  * mrsas_alloc_mem:	Allocate DMAable memory
1879  * input:			Adapter instance soft state
1880  *
1881  * This function creates the parent DMA tag and allocates DMAable memory. DMA
1882  * tag describes constraints of DMA mapping. Memory allocated is mapped into
1883  * Kernel virtual address. Callback argument is physical memory address.
1884  */
1885 static int
mrsas_alloc_mem(struct mrsas_softc * sc)1886 mrsas_alloc_mem(struct mrsas_softc *sc)
1887 {
1888 	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size,
1889 		evt_detail_size, count, pd_info_size;
1890 
1891 	/*
1892 	 * Allocate parent DMA tag
1893 	 */
1894 	if (bus_dma_tag_create(
1895 	    bus_get_dma_tag(sc->mrsas_dev),	/* parent */
1896 	    1,				/* alignment */
1897 	    0,				/* boundary */
1898 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1899 	    BUS_SPACE_MAXADDR,		/* highaddr */
1900 	    NULL, NULL,			/* filter, filterarg */
1901 	    BUS_SPACE_MAXSIZE,		/* maxsize */
1902 	    BUS_SPACE_UNRESTRICTED,	/* nsegments */
1903 	    BUS_SPACE_MAXSIZE,		/* maxsegsize */
1904 	    0,				/* flags */
1905 	    NULL, NULL,			/* lockfunc, lockarg */
1906 	    &sc->mrsas_parent_tag	/* tag */
1907 	    )) {
1908 		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1909 		return (ENOMEM);
1910 	}
1911 	/*
1912 	 * Allocate for version buffer
1913 	 */
1914 	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1915 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1916 	    1, 0,
1917 	    BUS_SPACE_MAXADDR_32BIT,
1918 	    BUS_SPACE_MAXADDR,
1919 	    NULL, NULL,
1920 	    verbuf_size,
1921 	    1,
1922 	    verbuf_size,
1923 	    BUS_DMA_ALLOCNOW,
1924 	    NULL, NULL,
1925 	    &sc->verbuf_tag)) {
1926 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1927 		return (ENOMEM);
1928 	}
1929 	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1930 	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1931 		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1932 		return (ENOMEM);
1933 	}
1934 	bzero(sc->verbuf_mem, verbuf_size);
1935 	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1936 	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1937 	    BUS_DMA_NOWAIT)) {
1938 		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1939 		return (ENOMEM);
1940 	}
1941 	/*
1942 	 * Allocate IO Request Frames
1943 	 */
1944 	io_req_size = sc->io_frames_alloc_sz;
1945 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1946 	    16, 0,
1947 	    BUS_SPACE_MAXADDR_32BIT,
1948 	    BUS_SPACE_MAXADDR,
1949 	    NULL, NULL,
1950 	    io_req_size,
1951 	    1,
1952 	    io_req_size,
1953 	    BUS_DMA_ALLOCNOW,
1954 	    NULL, NULL,
1955 	    &sc->io_request_tag)) {
1956 		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1957 		return (ENOMEM);
1958 	}
1959 	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1960 	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1961 		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1962 		return (ENOMEM);
1963 	}
1964 	bzero(sc->io_request_mem, io_req_size);
1965 	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1966 	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1967 	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1968 		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1969 		return (ENOMEM);
1970 	}
1971 	/*
1972 	 * Allocate Chain Frames
1973 	 */
1974 	chain_frame_size = sc->chain_frames_alloc_sz;
1975 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1976 	    4, 0,
1977 	    BUS_SPACE_MAXADDR_32BIT,
1978 	    BUS_SPACE_MAXADDR,
1979 	    NULL, NULL,
1980 	    chain_frame_size,
1981 	    1,
1982 	    chain_frame_size,
1983 	    BUS_DMA_ALLOCNOW,
1984 	    NULL, NULL,
1985 	    &sc->chain_frame_tag)) {
1986 		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1987 		return (ENOMEM);
1988 	}
1989 	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1990 	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1991 		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1992 		return (ENOMEM);
1993 	}
1994 	bzero(sc->chain_frame_mem, chain_frame_size);
1995 	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1996 	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1997 	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1998 		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1999 		return (ENOMEM);
2000 	}
2001 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2002 	/*
2003 	 * Allocate Reply Descriptor Array
2004 	 */
2005 	reply_desc_size = sc->reply_alloc_sz * count;
2006 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2007 	    16, 0,
2008 	    BUS_SPACE_MAXADDR_32BIT,
2009 	    BUS_SPACE_MAXADDR,
2010 	    NULL, NULL,
2011 	    reply_desc_size,
2012 	    1,
2013 	    reply_desc_size,
2014 	    BUS_DMA_ALLOCNOW,
2015 	    NULL, NULL,
2016 	    &sc->reply_desc_tag)) {
2017 		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
2018 		return (ENOMEM);
2019 	}
2020 	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
2021 	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
2022 		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
2023 		return (ENOMEM);
2024 	}
2025 	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
2026 	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
2027 	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
2028 		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
2029 		return (ENOMEM);
2030 	}
2031 	/*
2032 	 * Allocate Sense Buffer Array.  Keep in lower 4GB
2033 	 */
2034 	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
2035 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2036 	    64, 0,
2037 	    BUS_SPACE_MAXADDR_32BIT,
2038 	    BUS_SPACE_MAXADDR,
2039 	    NULL, NULL,
2040 	    sense_size,
2041 	    1,
2042 	    sense_size,
2043 	    BUS_DMA_ALLOCNOW,
2044 	    NULL, NULL,
2045 	    &sc->sense_tag)) {
2046 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
2047 		return (ENOMEM);
2048 	}
2049 	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
2050 	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
2051 		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
2052 		return (ENOMEM);
2053 	}
2054 	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
2055 	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
2056 	    BUS_DMA_NOWAIT)) {
2057 		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
2058 		return (ENOMEM);
2059 	}
2060 
2061 	/*
2062 	 * Allocate for Event detail structure
2063 	 */
2064 	evt_detail_size = sizeof(struct mrsas_evt_detail);
2065 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2066 	    1, 0,
2067 	    BUS_SPACE_MAXADDR_32BIT,
2068 	    BUS_SPACE_MAXADDR,
2069 	    NULL, NULL,
2070 	    evt_detail_size,
2071 	    1,
2072 	    evt_detail_size,
2073 	    BUS_DMA_ALLOCNOW,
2074 	    NULL, NULL,
2075 	    &sc->evt_detail_tag)) {
2076 		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
2077 		return (ENOMEM);
2078 	}
2079 	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
2080 	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
2081 		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
2082 		return (ENOMEM);
2083 	}
2084 	bzero(sc->evt_detail_mem, evt_detail_size);
2085 	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
2086 	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
2087 	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
2088 		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
2089 		return (ENOMEM);
2090 	}
2091 
2092 	/*
2093 	 * Allocate for PD INFO structure
2094 	 */
2095 	pd_info_size = sizeof(struct mrsas_pd_info);
2096 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2097 	    1, 0,
2098 	    BUS_SPACE_MAXADDR_32BIT,
2099 	    BUS_SPACE_MAXADDR,
2100 	    NULL, NULL,
2101 	    pd_info_size,
2102 	    1,
2103 	    pd_info_size,
2104 	    BUS_DMA_ALLOCNOW,
2105 	    NULL, NULL,
2106 	    &sc->pd_info_tag)) {
2107 		device_printf(sc->mrsas_dev, "Cannot create PD INFO tag\n");
2108 		return (ENOMEM);
2109 	}
2110 	if (bus_dmamem_alloc(sc->pd_info_tag, (void **)&sc->pd_info_mem,
2111 	    BUS_DMA_NOWAIT, &sc->pd_info_dmamap)) {
2112 		device_printf(sc->mrsas_dev, "Cannot alloc PD INFO buffer memory\n");
2113 		return (ENOMEM);
2114 	}
2115 	bzero(sc->pd_info_mem, pd_info_size);
2116 	if (bus_dmamap_load(sc->pd_info_tag, sc->pd_info_dmamap,
2117 	    sc->pd_info_mem, pd_info_size, mrsas_addr_cb,
2118 	    &sc->pd_info_phys_addr, BUS_DMA_NOWAIT)) {
2119 		device_printf(sc->mrsas_dev, "Cannot load PD INFO buffer memory\n");
2120 		return (ENOMEM);
2121 	}
2122 
2123 	/*
2124 	 * Create a dma tag for data buffers; size will be the maximum
2125 	 * possible I/O size (280kB).
2126 	 */
2127 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2128 	    1,
2129 	    0,
2130 	    BUS_SPACE_MAXADDR,
2131 	    BUS_SPACE_MAXADDR,
2132 	    NULL, NULL,
2133 	    maxphys,
2134 	    sc->max_num_sge,		/* nsegments */
2135 	    maxphys,
2136 	    BUS_DMA_ALLOCNOW,
2137 	    busdma_lock_mutex,
2138 	    &sc->io_lock,
2139 	    &sc->data_tag)) {
2140 		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
2141 		return (ENOMEM);
2142 	}
2143 	return (0);
2144 }
2145 
2146 /*
2147  * mrsas_addr_cb:	Callback function of bus_dmamap_load()
2148  * input:			callback argument, machine dependent type
2149  * 					that describes DMA segments, number of segments, error code
2150  *
2151  * This function is for the driver to receive mapping information resultant of
2152  * the bus_dmamap_load(). The information is actually not being used, but the
2153  * address is saved anyway.
2154  */
2155 void
mrsas_addr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2156 mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2157 {
2158 	bus_addr_t *addr;
2159 
2160 	addr = arg;
2161 	*addr = segs[0].ds_addr;
2162 }
2163 
2164 /*
2165  * mrsas_setup_raidmap:	Set up RAID map.
2166  * input:				Adapter instance soft state
2167  *
2168  * Allocate DMA memory for the RAID maps and perform setup.
2169  */
2170 static int
mrsas_setup_raidmap(struct mrsas_softc * sc)2171 mrsas_setup_raidmap(struct mrsas_softc *sc)
2172 {
2173 	int i;
2174 
2175 	for (i = 0; i < 2; i++) {
2176 		sc->ld_drv_map[i] =
2177 		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
2178 		/* Do Error handling */
2179 		if (!sc->ld_drv_map[i]) {
2180 			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
2181 
2182 			if (i == 1)
2183 				free(sc->ld_drv_map[0], M_MRSAS);
2184 			/* ABORT driver initialization */
2185 			goto ABORT;
2186 		}
2187 	}
2188 
2189 	for (int i = 0; i < 2; i++) {
2190 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2191 		    4, 0,
2192 		    BUS_SPACE_MAXADDR_32BIT,
2193 		    BUS_SPACE_MAXADDR,
2194 		    NULL, NULL,
2195 		    sc->max_map_sz,
2196 		    1,
2197 		    sc->max_map_sz,
2198 		    BUS_DMA_ALLOCNOW,
2199 		    NULL, NULL,
2200 		    &sc->raidmap_tag[i])) {
2201 			device_printf(sc->mrsas_dev,
2202 			    "Cannot allocate raid map tag.\n");
2203 			return (ENOMEM);
2204 		}
2205 		if (bus_dmamem_alloc(sc->raidmap_tag[i],
2206 		    (void **)&sc->raidmap_mem[i],
2207 		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
2208 			device_printf(sc->mrsas_dev,
2209 			    "Cannot allocate raidmap memory.\n");
2210 			return (ENOMEM);
2211 		}
2212 		bzero(sc->raidmap_mem[i], sc->max_map_sz);
2213 
2214 		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
2215 		    sc->raidmap_mem[i], sc->max_map_sz,
2216 		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
2217 		    BUS_DMA_NOWAIT)) {
2218 			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
2219 			return (ENOMEM);
2220 		}
2221 		if (!sc->raidmap_mem[i]) {
2222 			device_printf(sc->mrsas_dev,
2223 			    "Cannot allocate memory for raid map.\n");
2224 			return (ENOMEM);
2225 		}
2226 	}
2227 
2228 	if (!mrsas_get_map_info(sc))
2229 		mrsas_sync_map_info(sc);
2230 
2231 	return (0);
2232 
2233 ABORT:
2234 	return (1);
2235 }
2236 
2237 /**
2238  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
2239  * @sc:				Adapter soft state
2240  *
2241  * Return 0 on success.
2242  */
2243 void
megasas_setup_jbod_map(struct mrsas_softc * sc)2244 megasas_setup_jbod_map(struct mrsas_softc *sc)
2245 {
2246 	int i;
2247 	uint32_t pd_seq_map_sz;
2248 
2249 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
2250 	    (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
2251 
2252 	if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
2253 		sc->use_seqnum_jbod_fp = 0;
2254 		return;
2255 	}
2256 	if (sc->jbodmap_mem[0])
2257 		goto skip_alloc;
2258 
2259 	for (i = 0; i < 2; i++) {
2260 		if (bus_dma_tag_create(sc->mrsas_parent_tag,
2261 		    4, 0,
2262 		    BUS_SPACE_MAXADDR_32BIT,
2263 		    BUS_SPACE_MAXADDR,
2264 		    NULL, NULL,
2265 		    pd_seq_map_sz,
2266 		    1,
2267 		    pd_seq_map_sz,
2268 		    BUS_DMA_ALLOCNOW,
2269 		    NULL, NULL,
2270 		    &sc->jbodmap_tag[i])) {
2271 			device_printf(sc->mrsas_dev,
2272 			    "Cannot allocate jbod map tag.\n");
2273 			return;
2274 		}
2275 		if (bus_dmamem_alloc(sc->jbodmap_tag[i],
2276 		    (void **)&sc->jbodmap_mem[i],
2277 		    BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
2278 			device_printf(sc->mrsas_dev,
2279 			    "Cannot allocate jbod map memory.\n");
2280 			return;
2281 		}
2282 		bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
2283 
2284 		if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
2285 		    sc->jbodmap_mem[i], pd_seq_map_sz,
2286 		    mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
2287 		    BUS_DMA_NOWAIT)) {
2288 			device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
2289 			return;
2290 		}
2291 		if (!sc->jbodmap_mem[i]) {
2292 			device_printf(sc->mrsas_dev,
2293 			    "Cannot allocate memory for jbod map.\n");
2294 			sc->use_seqnum_jbod_fp = 0;
2295 			return;
2296 		}
2297 	}
2298 
2299 skip_alloc:
2300 	if (!megasas_sync_pd_seq_num(sc, false) &&
2301 	    !megasas_sync_pd_seq_num(sc, true))
2302 		sc->use_seqnum_jbod_fp = 1;
2303 	else
2304 		sc->use_seqnum_jbod_fp = 0;
2305 
2306 	device_printf(sc->mrsas_dev, "Jbod map is supported\n");
2307 }
2308 
2309 /*
2310  * mrsas_init_fw:	Initialize Firmware
2311  * input:			Adapter soft state
2312  *
2313  * Calls transition_to_ready() to make sure Firmware is in operational state and
2314  * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
2315  * issues internal commands to get the controller info after the IOC_INIT
2316  * command response is received by Firmware.  Note:  code relating to
2317  * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2318  * is left here as placeholder.
2319  */
2320 static int
mrsas_init_fw(struct mrsas_softc * sc)2321 mrsas_init_fw(struct mrsas_softc *sc)
2322 {
2323 
2324 	int ret, loop, ocr = 0;
2325 	u_int32_t max_sectors_1;
2326 	u_int32_t max_sectors_2;
2327 	u_int32_t tmp_sectors;
2328 	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
2329 	int msix_enable = 0;
2330 	int fw_msix_count = 0;
2331 	int i, j;
2332 
2333 	/* Make sure Firmware is ready */
2334 	ret = mrsas_transition_to_ready(sc, ocr);
2335 	if (ret != SUCCESS) {
2336 		return (ret);
2337 	}
2338 	if (sc->is_ventura || sc->is_aero) {
2339 		scratch_pad_3 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_3));
2340 #if VD_EXT_DEBUG
2341 		device_printf(sc->mrsas_dev, "scratch_pad_3 0x%x\n", scratch_pad_3);
2342 #endif
2343 		sc->maxRaidMapSize = ((scratch_pad_3 >>
2344 		    MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
2345 		    MR_MAX_RAID_MAP_SIZE_MASK);
2346 	}
2347 	/* MSI-x index 0- reply post host index register */
2348 	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2349 	/* Check if MSI-X is supported while in ready state */
2350 	msix_enable = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2351 
2352 	if (msix_enable) {
2353 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2354 		    outbound_scratch_pad_2));
2355 
2356 		/* Check max MSI-X vectors */
2357 		if (sc->device_id == MRSAS_TBOLT) {
2358 			sc->msix_vectors = (scratch_pad_2
2359 			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2360 			fw_msix_count = sc->msix_vectors;
2361 		} else {
2362 			/* Invader/Fury supports 96 MSI-X vectors */
2363 			sc->msix_vectors = ((scratch_pad_2
2364 			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2365 			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2366 			fw_msix_count = sc->msix_vectors;
2367 
2368 			if ((sc->mrsas_gen3_ctrl && (sc->msix_vectors > 8)) ||
2369 				((sc->is_ventura || sc->is_aero) && (sc->msix_vectors > 16)))
2370 				sc->msix_combined = true;
2371 			/*
2372 			 * Save 1-15 reply post index
2373 			 * address to local memory Index 0
2374 			 * is already saved from reg offset
2375 			 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
2376 			 */
2377 			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2378 			    loop++) {
2379 				sc->msix_reg_offset[loop] =
2380 				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2381 				    (loop * 0x10);
2382 			}
2383 		}
2384 
2385 		/* Don't bother allocating more MSI-X vectors than cpus */
2386 		sc->msix_vectors = min(sc->msix_vectors,
2387 		    mp_ncpus);
2388 
2389 		/* Allocate MSI-x vectors */
2390 		if (mrsas_allocate_msix(sc) == SUCCESS)
2391 			sc->msix_enable = 1;
2392 		else
2393 			sc->msix_enable = 0;
2394 
2395 		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2396 		    "Online CPU %d Current MSIX <%d>\n",
2397 		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2398 	}
2399 	/*
2400      * MSI-X host index 0 is common for all adapter.
2401      * It is used for all MPT based Adapters.
2402 	 */
2403 	if (sc->msix_combined) {
2404 		sc->msix_reg_offset[0] =
2405 		    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET;
2406 	}
2407 	if (mrsas_init_adapter(sc) != SUCCESS) {
2408 		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2409 		return (1);
2410 	}
2411 
2412 	if (sc->is_ventura || sc->is_aero) {
2413 		scratch_pad_4 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2414 		    outbound_scratch_pad_4));
2415 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
2416 			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
2417 
2418 		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
2419 	}
2420 
2421 	/* Allocate internal commands for pass-thru */
2422 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2423 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2424 		return (1);
2425 	}
2426 	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2427 	if (!sc->ctrl_info) {
2428 		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2429 		return (1);
2430 	}
2431 	/*
2432 	 * Get the controller info from FW, so that the MAX VD support
2433 	 * availability can be decided.
2434 	 */
2435 	if (mrsas_get_ctrl_info(sc)) {
2436 		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2437 		return (1);
2438 	}
2439 	sc->secure_jbod_support =
2440 	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2441 
2442 	if (sc->secure_jbod_support)
2443 		device_printf(sc->mrsas_dev, "FW supports SED \n");
2444 
2445 	if (sc->use_seqnum_jbod_fp)
2446 		device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
2447 
2448 	if (sc->support_morethan256jbod)
2449 		device_printf(sc->mrsas_dev, "FW supports JBOD Map Ext \n");
2450 
2451 	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2452 		device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
2453 		    "There seems to be some problem in the controller\n"
2454 		    "Please contact to the SUPPORT TEAM if the problem persists\n");
2455 	}
2456 	megasas_setup_jbod_map(sc);
2457 
2458 	memset(sc->target_list, 0,
2459 		MRSAS_MAX_TM_TARGETS * sizeof(struct mrsas_target));
2460 	for (i = 0; i < MRSAS_MAX_TM_TARGETS; i++)
2461 		sc->target_list[i].target_id = 0xffff;
2462 
2463 	/* For pass-thru, get PD/LD list and controller info */
2464 	memset(sc->pd_list, 0,
2465 	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2466 	if (mrsas_get_pd_list(sc) != SUCCESS) {
2467 		device_printf(sc->mrsas_dev, "Get PD list failed.\n");
2468 		return (1);
2469 	}
2470 	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2471 	if (mrsas_get_ld_list(sc) != SUCCESS) {
2472 		device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
2473 		return (1);
2474 	}
2475 
2476 	if ((sc->is_ventura || sc->is_aero) && sc->drv_stream_detection) {
2477 		sc->streamDetectByLD = malloc(sizeof(PTR_LD_STREAM_DETECT) *
2478 						MAX_LOGICAL_DRIVES_EXT, M_MRSAS, M_NOWAIT);
2479 		if (!sc->streamDetectByLD) {
2480 			device_printf(sc->mrsas_dev,
2481 				"unable to allocate stream detection for pool of LDs\n");
2482 			return (1);
2483 		}
2484 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
2485 			sc->streamDetectByLD[i] = malloc(sizeof(LD_STREAM_DETECT), M_MRSAS, M_NOWAIT);
2486 			if (!sc->streamDetectByLD[i]) {
2487 				device_printf(sc->mrsas_dev, "unable to allocate stream detect by LD\n");
2488 				for (j = 0; j < i; ++j)
2489 					free(sc->streamDetectByLD[j], M_MRSAS);
2490 				free(sc->streamDetectByLD, M_MRSAS);
2491 				sc->streamDetectByLD = NULL;
2492 				return (1);
2493 			}
2494 			memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
2495 			sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
2496 		}
2497 	}
2498 
2499 	/*
2500 	 * Compute the max allowed sectors per IO: The controller info has
2501 	 * two limits on max sectors. Driver should use the minimum of these
2502 	 * two.
2503 	 *
2504 	 * 1 << stripe_sz_ops.min = max sectors per strip
2505 	 *
2506 	 * Note that older firmwares ( < FW ver 30) didn't report information to
2507 	 * calculate max_sectors_1. So the number ended up as zero always.
2508 	 */
2509 	tmp_sectors = 0;
2510 	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2511 	    sc->ctrl_info->max_strips_per_io;
2512 	max_sectors_2 = sc->ctrl_info->max_request_size;
2513 	tmp_sectors = min(max_sectors_1, max_sectors_2);
2514 	sc->max_sectors_per_req = (sc->max_num_sge - 1) * MRSAS_PAGE_SIZE / 512;
2515 
2516 	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2517 		sc->max_sectors_per_req = tmp_sectors;
2518 
2519 	sc->disableOnlineCtrlReset =
2520 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2521 	sc->UnevenSpanSupport =
2522 	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2523 	if (sc->UnevenSpanSupport) {
2524 		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2525 		    sc->UnevenSpanSupport);
2526 
2527 		if (MR_ValidateMapInfo(sc))
2528 			sc->fast_path_io = 1;
2529 		else
2530 			sc->fast_path_io = 0;
2531 	}
2532 
2533 	device_printf(sc->mrsas_dev, "max_fw_cmds: %u  max_scsi_cmds: %u\n",
2534 		sc->max_fw_cmds, sc->max_scsi_cmds);
2535 	return (0);
2536 }
2537 
2538 /*
2539  * mrsas_init_adapter:	Initializes the adapter/controller
2540  * input:				Adapter soft state
2541  *
2542  * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2543  * ROC/controller.  The FW register is read to determined the number of
2544  * commands that is supported.  All memory allocations for IO is based on
2545  * max_cmd.  Appropriate calculations are performed in this function.
2546  */
2547 int
mrsas_init_adapter(struct mrsas_softc * sc)2548 mrsas_init_adapter(struct mrsas_softc *sc)
2549 {
2550 	uint32_t status;
2551 	u_int32_t scratch_pad_2;
2552 	int ret;
2553 	int i = 0;
2554 
2555 	/* Read FW status register */
2556 	status = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2557 
2558 	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2559 
2560 	/* Decrement the max supported by 1, to correlate with FW */
2561 	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2562 	sc->max_scsi_cmds = sc->max_fw_cmds - MRSAS_MAX_MFI_CMDS;
2563 
2564 	/* Determine allocation size of command frames */
2565 	sc->reply_q_depth = ((sc->max_fw_cmds + 1 + 15) / 16 * 16) * 2;
2566 	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
2567 	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2568 	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
2569 	    (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
2570 	scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2571 	    outbound_scratch_pad_2));
2572 
2573 	mrsas_dprint(sc, MRSAS_TRACE, "%s: sc->reply_q_depth 0x%x,"
2574 	    "sc->request_alloc_sz 0x%x, sc->reply_alloc_sz 0x%x,"
2575 	    "sc->io_frames_alloc_sz 0x%x\n", __func__,
2576 	    sc->reply_q_depth, sc->request_alloc_sz,
2577 	    sc->reply_alloc_sz, sc->io_frames_alloc_sz);
2578 
2579 	/*
2580 	 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
2581 	 * Firmware support extended IO chain frame which is 4 time more
2582 	 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
2583 	 * 1K 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
2584 	 */
2585 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
2586 		sc->max_chain_frame_sz =
2587 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2588 		    * MEGASAS_1MB_IO;
2589 	else
2590 		sc->max_chain_frame_sz =
2591 		    ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
2592 		    * MEGASAS_256K_IO;
2593 
2594 	sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * sc->max_fw_cmds;
2595 	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2596 	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2597 
2598 	sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
2599 	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2600 
2601 	mrsas_dprint(sc, MRSAS_INFO,
2602 	    "max sge: 0x%x, max chain frame size: 0x%x, "
2603 	    "max fw cmd: 0x%x sc->chain_frames_alloc_sz: 0x%x\n",
2604 	    sc->max_num_sge,
2605 	    sc->max_chain_frame_sz, sc->max_fw_cmds,
2606 	    sc->chain_frames_alloc_sz);
2607 
2608 	/* Used for pass thru MFI frame (DCMD) */
2609 	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2610 
2611 	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2612 	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2613 
2614 	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2615 
2616 	for (i = 0; i < count; i++)
2617 		sc->last_reply_idx[i] = 0;
2618 
2619 	ret = mrsas_alloc_mem(sc);
2620 	if (ret != SUCCESS)
2621 		return (ret);
2622 
2623 	ret = mrsas_alloc_mpt_cmds(sc);
2624 	if (ret != SUCCESS)
2625 		return (ret);
2626 
2627 	ret = mrsas_ioc_init(sc);
2628 	if (ret != SUCCESS)
2629 		return (ret);
2630 
2631 	return (0);
2632 }
2633 
2634 /*
2635  * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2636  * input:				Adapter soft state
2637  *
2638  * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2639  */
2640 int
mrsas_alloc_ioc_cmd(struct mrsas_softc * sc)2641 mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2642 {
2643 	int ioc_init_size;
2644 
2645 	/* Allocate IOC INIT command */
2646 	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2647 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2648 	    1, 0,
2649 	    BUS_SPACE_MAXADDR_32BIT,
2650 	    BUS_SPACE_MAXADDR,
2651 	    NULL, NULL,
2652 	    ioc_init_size,
2653 	    1,
2654 	    ioc_init_size,
2655 	    BUS_DMA_ALLOCNOW,
2656 	    NULL, NULL,
2657 	    &sc->ioc_init_tag)) {
2658 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2659 		return (ENOMEM);
2660 	}
2661 	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2662 	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2663 		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2664 		return (ENOMEM);
2665 	}
2666 	bzero(sc->ioc_init_mem, ioc_init_size);
2667 	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2668 	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2669 	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2670 		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2671 		return (ENOMEM);
2672 	}
2673 	return (0);
2674 }
2675 
2676 /*
2677  * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2678  * input:				Adapter soft state
2679  *
2680  * Deallocates memory of the IOC Init cmd.
2681  */
2682 void
mrsas_free_ioc_cmd(struct mrsas_softc * sc)2683 mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2684 {
2685 	if (sc->ioc_init_phys_mem)
2686 		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2687 	if (sc->ioc_init_mem != NULL)
2688 		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2689 	if (sc->ioc_init_tag != NULL)
2690 		bus_dma_tag_destroy(sc->ioc_init_tag);
2691 }
2692 
2693 /*
2694  * mrsas_ioc_init:	Sends IOC Init command to FW
2695  * input:			Adapter soft state
2696  *
2697  * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2698  */
2699 int
mrsas_ioc_init(struct mrsas_softc * sc)2700 mrsas_ioc_init(struct mrsas_softc *sc)
2701 {
2702 	struct mrsas_init_frame *init_frame;
2703 	pMpi2IOCInitRequest_t IOCInitMsg;
2704 	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2705 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2706 	bus_addr_t phys_addr;
2707 	int i, retcode = 0;
2708 	u_int32_t scratch_pad_2;
2709 
2710 	/* Allocate memory for the IOC INIT command */
2711 	if (mrsas_alloc_ioc_cmd(sc)) {
2712 		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2713 		return (1);
2714 	}
2715 
2716 	if (!sc->block_sync_cache) {
2717 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2718 		    outbound_scratch_pad_2));
2719 		sc->fw_sync_cache_support = (scratch_pad_2 &
2720 		    MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
2721 	}
2722 
2723 	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2724 	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2725 	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2726 	IOCInitMsg->MsgVersion = htole16(MPI2_VERSION);
2727 	IOCInitMsg->HeaderVersion = htole16(MPI2_HEADER_VERSION);
2728 	IOCInitMsg->SystemRequestFrameSize = htole16(MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
2729 	IOCInitMsg->ReplyDescriptorPostQueueDepth = htole16(sc->reply_q_depth);
2730 	IOCInitMsg->ReplyDescriptorPostQueueAddress = htole64(sc->reply_desc_phys_addr);
2731 	IOCInitMsg->SystemRequestFrameBaseAddress = htole64(sc->io_request_phys_addr);
2732 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2733 	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
2734 
2735 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2736 	init_frame->cmd = MFI_CMD_INIT;
2737 	init_frame->cmd_status = 0xFF;
2738 	init_frame->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
2739 
2740 	/* driver support Extended MSIX */
2741 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
2742 		init_frame->driver_operations.
2743 		    mfi_capabilities.support_additional_msix = 1;
2744 	}
2745 	if (sc->verbuf_mem) {
2746 		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2747 		    MRSAS_VERSION);
2748 		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2749 		init_frame->driver_ver_hi = 0;
2750 	}
2751 	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2752 	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2753 	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2754 	if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
2755 		init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
2756 
2757 	init_frame->driver_operations.reg = htole32(init_frame->driver_operations.reg);
2758 
2759 	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2760 	init_frame->queue_info_new_phys_addr_lo = htole32(phys_addr);
2761 	init_frame->data_xfer_len = htole32(sizeof(Mpi2IOCInitRequest_t));
2762 
2763 	req_desc.addr.Words = htole64((bus_addr_t)sc->ioc_init_phys_mem);
2764 	req_desc.MFAIo.RequestFlags =
2765 	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2766 
2767 	mrsas_disable_intr(sc);
2768 	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2769 	mrsas_write_64bit_req_desc(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2770 
2771 	/*
2772 	 * Poll response timer to wait for Firmware response.  While this
2773 	 * timer with the DELAY call could block CPU, the time interval for
2774 	 * this is only 1 millisecond.
2775 	 */
2776 	if (init_frame->cmd_status == 0xFF) {
2777 		for (i = 0; i < (max_wait * 1000); i++) {
2778 			if (init_frame->cmd_status == 0xFF)
2779 				DELAY(1000);
2780 			else
2781 				break;
2782 		}
2783 	}
2784 	if (init_frame->cmd_status == 0)
2785 		mrsas_dprint(sc, MRSAS_OCR,
2786 		    "IOC INIT response received from FW.\n");
2787 	else {
2788 		if (init_frame->cmd_status == 0xFF)
2789 			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2790 		else
2791 			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2792 		retcode = 1;
2793 	}
2794 
2795 	if (sc->is_aero) {
2796 		scratch_pad_2 = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
2797 		    outbound_scratch_pad_2));
2798 		sc->atomic_desc_support = (scratch_pad_2 &
2799 			MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
2800 		device_printf(sc->mrsas_dev, "FW supports atomic descriptor: %s\n",
2801 			sc->atomic_desc_support ? "Yes" : "No");
2802 	}
2803 
2804 	mrsas_free_ioc_cmd(sc);
2805 	return (retcode);
2806 }
2807 
2808 /*
2809  * mrsas_alloc_mpt_cmds:	Allocates the command packets
2810  * input:					Adapter instance soft state
2811  *
2812  * This function allocates the internal commands for IOs. Each command that is
2813  * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2814  * array is allocated with mrsas_mpt_cmd context.  The free commands are
2815  * maintained in a linked list (cmd pool). SMID value range is from 1 to
2816  * max_fw_cmds.
2817  */
2818 int
mrsas_alloc_mpt_cmds(struct mrsas_softc * sc)2819 mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2820 {
2821 	int i, j;
2822 	u_int32_t max_fw_cmds, count;
2823 	struct mrsas_mpt_cmd *cmd;
2824 	pMpi2ReplyDescriptorsUnion_t reply_desc;
2825 	u_int32_t offset, chain_offset, sense_offset;
2826 	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2827 	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2828 
2829 	max_fw_cmds = sc->max_fw_cmds;
2830 
2831 	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2832 	if (!sc->req_desc) {
2833 		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2834 		return (ENOMEM);
2835 	}
2836 	memset(sc->req_desc, 0, sc->request_alloc_sz);
2837 
2838 	/*
2839 	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2840 	 * Allocate the dynamic array first and then allocate individual
2841 	 * commands.
2842 	 */
2843 	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds,
2844 	    M_MRSAS, M_NOWAIT);
2845 	if (!sc->mpt_cmd_list) {
2846 		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2847 		return (ENOMEM);
2848 	}
2849 	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_fw_cmds);
2850 	for (i = 0; i < max_fw_cmds; i++) {
2851 		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2852 		    M_MRSAS, M_NOWAIT);
2853 		if (!sc->mpt_cmd_list[i]) {
2854 			for (j = 0; j < i; j++)
2855 				free(sc->mpt_cmd_list[j], M_MRSAS);
2856 			free(sc->mpt_cmd_list, M_MRSAS);
2857 			sc->mpt_cmd_list = NULL;
2858 			return (ENOMEM);
2859 		}
2860 	}
2861 
2862 	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2863 	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2864 	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2865 	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2866 	sense_base = (u_int8_t *)sc->sense_mem;
2867 	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2868 	for (i = 0; i < max_fw_cmds; i++) {
2869 		cmd = sc->mpt_cmd_list[i];
2870 		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2871 		chain_offset = sc->max_chain_frame_sz * i;
2872 		sense_offset = MRSAS_SENSE_LEN * i;
2873 		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2874 		cmd->index = i + 1;
2875 		cmd->ccb_ptr = NULL;
2876 		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2877 		callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0);
2878 		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2879 		cmd->sc = sc;
2880 		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2881 		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2882 		cmd->io_request_phys_addr = io_req_base_phys + offset;
2883 		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2884 		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2885 		cmd->sense = sense_base + sense_offset;
2886 		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2887 		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2888 			return (FAIL);
2889 		}
2890 		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2891 	}
2892 
2893 	/* Initialize reply descriptor array to 0xFFFFFFFF */
2894 	reply_desc = sc->reply_desc_mem;
2895 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2896 	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2897 		reply_desc->Words = MRSAS_ULONG_MAX;
2898 	}
2899 	return (0);
2900 }
2901 
2902 /*
2903  * mrsas_write_64bit_req_dsc:	Writes 64 bit request descriptor to FW
2904  * input:			Adapter softstate
2905  * 				request descriptor address low
2906  * 				request descriptor address high
2907  */
2908 void
mrsas_write_64bit_req_desc(struct mrsas_softc * sc,u_int32_t req_desc_lo,u_int32_t req_desc_hi)2909 mrsas_write_64bit_req_desc(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2910     u_int32_t req_desc_hi)
2911 {
2912 	mtx_lock(&sc->pci_lock);
2913 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2914 	    le32toh(req_desc_lo));
2915 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2916 	    le32toh(req_desc_hi));
2917 	mtx_unlock(&sc->pci_lock);
2918 }
2919 
2920 /*
2921  * mrsas_fire_cmd:	Sends command to FW
2922  * input:		Adapter softstate
2923  * 			request descriptor address low
2924  * 			request descriptor address high
2925  *
2926  * This functions fires the command to Firmware by writing to the
2927  * inbound_low_queue_port and inbound_high_queue_port.
2928  */
2929 void
mrsas_fire_cmd(struct mrsas_softc * sc,u_int32_t req_desc_lo,u_int32_t req_desc_hi)2930 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2931     u_int32_t req_desc_hi)
2932 {
2933 	if (sc->atomic_desc_support)
2934 		mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_single_queue_port),
2935 		    le32toh(req_desc_lo));
2936 	else
2937 		mrsas_write_64bit_req_desc(sc, req_desc_lo, req_desc_hi);
2938 }
2939 
2940 /*
2941  * mrsas_transition_to_ready:  Move FW to Ready state input:
2942  * Adapter instance soft state
2943  *
2944  * During the initialization, FW passes can potentially be in any one of several
2945  * possible states. If the FW in operational, waiting-for-handshake states,
2946  * driver must take steps to bring it to ready state. Otherwise, it has to
2947  * wait for the ready state.
2948  */
2949 int
mrsas_transition_to_ready(struct mrsas_softc * sc,int ocr)2950 mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2951 {
2952 	int i;
2953 	u_int8_t max_wait;
2954 	u_int32_t val, fw_state;
2955 	u_int32_t cur_state __unused;
2956 	u_int32_t abs_state, curr_abs_state;
2957 
2958 	val = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2959 	fw_state = val & MFI_STATE_MASK;
2960 	max_wait = MRSAS_RESET_WAIT_TIME;
2961 
2962 	if (fw_state != MFI_STATE_READY)
2963 		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2964 
2965 	while (fw_state != MFI_STATE_READY) {
2966 		abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2967 		switch (fw_state) {
2968 		case MFI_STATE_FAULT:
2969 			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2970 			if (ocr) {
2971 				cur_state = MFI_STATE_FAULT;
2972 				break;
2973 			} else
2974 				return -ENODEV;
2975 		case MFI_STATE_WAIT_HANDSHAKE:
2976 			/* Set the CLR bit in inbound doorbell */
2977 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2978 			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2979 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2980 			break;
2981 		case MFI_STATE_BOOT_MESSAGE_PENDING:
2982 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2983 			    MFI_INIT_HOTPLUG);
2984 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2985 			break;
2986 		case MFI_STATE_OPERATIONAL:
2987 			/*
2988 			 * Bring it to READY state; assuming max wait 10
2989 			 * secs
2990 			 */
2991 			mrsas_disable_intr(sc);
2992 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2993 			for (i = 0; i < max_wait * 1000; i++) {
2994 				if (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2995 					DELAY(1000);
2996 				else
2997 					break;
2998 			}
2999 			cur_state = MFI_STATE_OPERATIONAL;
3000 			break;
3001 		case MFI_STATE_UNDEFINED:
3002 			/*
3003 			 * This state should not last for more than 2
3004 			 * seconds
3005 			 */
3006 			cur_state = MFI_STATE_UNDEFINED;
3007 			break;
3008 		case MFI_STATE_BB_INIT:
3009 			cur_state = MFI_STATE_BB_INIT;
3010 			break;
3011 		case MFI_STATE_FW_INIT:
3012 			cur_state = MFI_STATE_FW_INIT;
3013 			break;
3014 		case MFI_STATE_FW_INIT_2:
3015 			cur_state = MFI_STATE_FW_INIT_2;
3016 			break;
3017 		case MFI_STATE_DEVICE_SCAN:
3018 			cur_state = MFI_STATE_DEVICE_SCAN;
3019 			break;
3020 		case MFI_STATE_FLUSH_CACHE:
3021 			cur_state = MFI_STATE_FLUSH_CACHE;
3022 			break;
3023 		default:
3024 			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
3025 			return -ENODEV;
3026 		}
3027 
3028 		/*
3029 		 * The cur_state should not last for more than max_wait secs
3030 		 */
3031 		for (i = 0; i < (max_wait * 1000); i++) {
3032 			fw_state = (mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3033 			    outbound_scratch_pad)) & MFI_STATE_MASK);
3034 			curr_abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3035 			    outbound_scratch_pad));
3036 			if (abs_state == curr_abs_state)
3037 				DELAY(1000);
3038 			else
3039 				break;
3040 		}
3041 
3042 		/*
3043 		 * Return error if fw_state hasn't changed after max_wait
3044 		 */
3045 		if (curr_abs_state == abs_state) {
3046 			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
3047 			    "in %d secs\n", fw_state, max_wait);
3048 			return -ENODEV;
3049 		}
3050 	}
3051 	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
3052 	return 0;
3053 }
3054 
3055 /*
3056  * mrsas_get_mfi_cmd:	Get a cmd from free command pool
3057  * input:				Adapter soft state
3058  *
3059  * This function removes an MFI command from the command list.
3060  */
3061 struct mrsas_mfi_cmd *
mrsas_get_mfi_cmd(struct mrsas_softc * sc)3062 mrsas_get_mfi_cmd(struct mrsas_softc *sc)
3063 {
3064 	struct mrsas_mfi_cmd *cmd = NULL;
3065 
3066 	mtx_lock(&sc->mfi_cmd_pool_lock);
3067 	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
3068 		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
3069 		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
3070 	}
3071 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3072 
3073 	return cmd;
3074 }
3075 
3076 /*
3077  * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
3078  * input:				Adapter Context.
3079  *
3080  * This function will check FW status register and flag do_timeout_reset flag.
3081  * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
3082  * trigger reset.
3083  */
3084 static void
mrsas_ocr_thread(void * arg)3085 mrsas_ocr_thread(void *arg)
3086 {
3087 	struct mrsas_softc *sc;
3088 	u_int32_t fw_status, fw_state;
3089 	u_int8_t tm_target_reset_failed = 0;
3090 
3091 	sc = (struct mrsas_softc *)arg;
3092 
3093 	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
3094 	sc->ocr_thread_active = 1;
3095 	mtx_lock(&sc->sim_lock);
3096 	for (;;) {
3097 		/* Sleep for 1 second and check the queue status */
3098 		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3099 		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
3100 		if (sc->remove_in_progress ||
3101 		    sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3102 			mrsas_dprint(sc, MRSAS_OCR,
3103 			    "Exit due to %s from %s\n",
3104 			    sc->remove_in_progress ? "Shutdown" :
3105 			    "Hardware critical error", __func__);
3106 			break;
3107 		}
3108 		fw_status = mrsas_read_reg_with_retries(sc,
3109 		    offsetof(mrsas_reg_set, outbound_scratch_pad));
3110 		fw_state = fw_status & MFI_STATE_MASK;
3111 		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset ||
3112 			mrsas_atomic_read(&sc->target_reset_outstanding)) {
3113 			/* First, freeze further IOs to come to the SIM */
3114 			mrsas_xpt_freeze(sc);
3115 
3116 			/* If this is an IO timeout then go for target reset */
3117 			if (mrsas_atomic_read(&sc->target_reset_outstanding)) {
3118 				device_printf(sc->mrsas_dev, "Initiating Target RESET "
3119 				    "because of SCSI IO timeout!\n");
3120 
3121 				/* Let the remaining IOs to complete */
3122 				msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
3123 				      "mrsas_reset_targets", 5 * hz);
3124 
3125 				/* Try to reset the target device */
3126 				if (mrsas_reset_targets(sc) == FAIL)
3127 					tm_target_reset_failed = 1;
3128 			}
3129 
3130 			/* If this is a DCMD timeout or FW fault,
3131 			 * then go for controller reset
3132 			 */
3133 			if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed ||
3134 			    (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) {
3135 				if (tm_target_reset_failed)
3136 					device_printf(sc->mrsas_dev, "Initiaiting OCR because of "
3137 					    "TM FAILURE!\n");
3138 				else
3139 					device_printf(sc->mrsas_dev, "Initiaiting OCR "
3140 						"because of %s!\n", sc->do_timedout_reset ?
3141 						"DCMD IO Timeout" : "FW fault");
3142 
3143 				mtx_lock_spin(&sc->ioctl_lock);
3144 				sc->reset_in_progress = 1;
3145 				mtx_unlock_spin(&sc->ioctl_lock);
3146 				sc->reset_count++;
3147 
3148 				/*
3149 				 * Wait for the AEN task to be completed if it is running.
3150 				 */
3151 				mtx_unlock(&sc->sim_lock);
3152 				taskqueue_drain(sc->ev_tq, &sc->ev_task);
3153 				mtx_lock(&sc->sim_lock);
3154 
3155 				taskqueue_block(sc->ev_tq);
3156 				/* Try to reset the controller */
3157 				mrsas_reset_ctrl(sc, sc->do_timedout_reset);
3158 
3159 				sc->do_timedout_reset = 0;
3160 				sc->reset_in_progress = 0;
3161 				tm_target_reset_failed = 0;
3162 				mrsas_atomic_set(&sc->target_reset_outstanding, 0);
3163 				memset(sc->target_reset_pool, 0,
3164 				    sizeof(sc->target_reset_pool));
3165 				taskqueue_unblock(sc->ev_tq);
3166 			}
3167 
3168 			/* Now allow IOs to come to the SIM */
3169 			 mrsas_xpt_release(sc);
3170 		}
3171 	}
3172 	mtx_unlock(&sc->sim_lock);
3173 	sc->ocr_thread_active = 0;
3174 	mrsas_kproc_exit(0);
3175 }
3176 
3177 /*
3178  * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
3179  * input:					Adapter Context.
3180  *
3181  * This function will clear reply descriptor so that post OCR driver and FW will
3182  * lost old history.
3183  */
3184 void
mrsas_reset_reply_desc(struct mrsas_softc * sc)3185 mrsas_reset_reply_desc(struct mrsas_softc *sc)
3186 {
3187 	int i, count;
3188 	pMpi2ReplyDescriptorsUnion_t reply_desc;
3189 
3190 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3191 	for (i = 0; i < count; i++)
3192 		sc->last_reply_idx[i] = 0;
3193 
3194 	reply_desc = sc->reply_desc_mem;
3195 	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
3196 		reply_desc->Words = MRSAS_ULONG_MAX;
3197 	}
3198 }
3199 
3200 /*
3201  * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
3202  * input:				Adapter Context.
3203  *
3204  * This function will run from thread context so that it can sleep. 1. Do not
3205  * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
3206  * to complete for 180 seconds. 3. If #2 does not find any outstanding
3207  * command Controller is in working state, so skip OCR. Otherwise, do
3208  * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
3209  * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
3210  * OCR, Re-fire Management command and move Controller to Operation state.
3211  */
3212 int
mrsas_reset_ctrl(struct mrsas_softc * sc,u_int8_t reset_reason)3213 mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
3214 {
3215 	int retval = SUCCESS, i, j, retry = 0;
3216 	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
3217 	union ccb *ccb;
3218 	struct mrsas_mfi_cmd *mfi_cmd;
3219 	struct mrsas_mpt_cmd *mpt_cmd;
3220 	union mrsas_evt_class_locale class_locale;
3221 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3222 
3223 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
3224 		device_printf(sc->mrsas_dev,
3225 		    "mrsas: Hardware critical error, returning FAIL.\n");
3226 		return FAIL;
3227 	}
3228 	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3229 	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
3230 	mrsas_disable_intr(sc);
3231 	msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
3232 	    sc->mrsas_fw_fault_check_delay * hz);
3233 
3234 	/* First try waiting for commands to complete */
3235 	if (mrsas_wait_for_outstanding(sc, reset_reason)) {
3236 		mrsas_dprint(sc, MRSAS_OCR,
3237 		    "resetting adapter from %s.\n",
3238 		    __func__);
3239 		/* Now return commands back to the CAM layer */
3240 		mtx_unlock(&sc->sim_lock);
3241 		for (i = 0; i < sc->max_fw_cmds; i++) {
3242 			mpt_cmd = sc->mpt_cmd_list[i];
3243 
3244 			if (mpt_cmd->peer_cmd) {
3245 				mrsas_dprint(sc, MRSAS_OCR,
3246 				    "R1 FP command [%d] - (mpt_cmd) %p, (peer_cmd) %p\n",
3247 				    i, mpt_cmd, mpt_cmd->peer_cmd);
3248 			}
3249 
3250 			if (mpt_cmd->ccb_ptr) {
3251 				if (mpt_cmd->callout_owner) {
3252 					ccb = (union ccb *)(mpt_cmd->ccb_ptr);
3253 					ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3254 					mrsas_cmd_done(sc, mpt_cmd);
3255 				} else {
3256 					mpt_cmd->ccb_ptr = NULL;
3257 					mrsas_release_mpt_cmd(mpt_cmd);
3258 				}
3259 			}
3260 		}
3261 
3262 		mrsas_atomic_set(&sc->fw_outstanding, 0);
3263 
3264 		mtx_lock(&sc->sim_lock);
3265 
3266 		status_reg = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3267 		    outbound_scratch_pad));
3268 		abs_state = status_reg & MFI_STATE_MASK;
3269 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3270 		if (sc->disableOnlineCtrlReset ||
3271 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3272 			/* Reset not supported, kill adapter */
3273 			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
3274 			mrsas_kill_hba(sc);
3275 			retval = FAIL;
3276 			goto out;
3277 		}
3278 		/* Now try to reset the chip */
3279 		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
3280 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3281 			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
3282 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3283 			    MPI2_WRSEQ_1ST_KEY_VALUE);
3284 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3285 			    MPI2_WRSEQ_2ND_KEY_VALUE);
3286 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3287 			    MPI2_WRSEQ_3RD_KEY_VALUE);
3288 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3289 			    MPI2_WRSEQ_4TH_KEY_VALUE);
3290 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3291 			    MPI2_WRSEQ_5TH_KEY_VALUE);
3292 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
3293 			    MPI2_WRSEQ_6TH_KEY_VALUE);
3294 
3295 			/* Check that the diag write enable (DRWE) bit is on */
3296 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3297 			    fusion_host_diag));
3298 			retry = 0;
3299 			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3300 				DELAY(100 * 1000);
3301 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3302 				    fusion_host_diag));
3303 				if (retry++ == 100) {
3304 					mrsas_dprint(sc, MRSAS_OCR,
3305 					    "Host diag unlock failed!\n");
3306 					break;
3307 				}
3308 			}
3309 			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3310 				continue;
3311 
3312 			/* Send chip reset command */
3313 			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
3314 			    host_diag | HOST_DIAG_RESET_ADAPTER);
3315 			DELAY(3000 * 1000);
3316 
3317 			/* Make sure reset adapter bit is cleared */
3318 			host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3319 			    fusion_host_diag));
3320 			retry = 0;
3321 			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3322 				DELAY(100 * 1000);
3323 				host_diag = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3324 				    fusion_host_diag));
3325 				if (retry++ == 1000) {
3326 					mrsas_dprint(sc, MRSAS_OCR,
3327 					    "Diag reset adapter never cleared!\n");
3328 					break;
3329 				}
3330 			}
3331 			if (host_diag & HOST_DIAG_RESET_ADAPTER)
3332 				continue;
3333 
3334 			abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3335 			    outbound_scratch_pad)) & MFI_STATE_MASK;
3336 			retry = 0;
3337 
3338 			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3339 				DELAY(100 * 1000);
3340 				abs_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3341 				    outbound_scratch_pad)) & MFI_STATE_MASK;
3342 			}
3343 			if (abs_state <= MFI_STATE_FW_INIT) {
3344 				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
3345 				    " state = 0x%x\n", abs_state);
3346 				continue;
3347 			}
3348 			/* Wait for FW to become ready */
3349 			if (mrsas_transition_to_ready(sc, 1)) {
3350 				mrsas_dprint(sc, MRSAS_OCR,
3351 				    "mrsas: Failed to transition controller to ready.\n");
3352 				continue;
3353 			}
3354 			mrsas_reset_reply_desc(sc);
3355 			if (mrsas_ioc_init(sc)) {
3356 				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
3357 				continue;
3358 			}
3359 			for (j = 0; j < sc->max_fw_cmds; j++) {
3360 				mpt_cmd = sc->mpt_cmd_list[j];
3361 				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3362 					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
3363 					/* If not an IOCTL then release the command else re-fire */
3364 					if (!mfi_cmd->sync_cmd) {
3365 						mrsas_release_mfi_cmd(mfi_cmd);
3366 					} else {
3367 						req_desc = mrsas_get_request_desc(sc,
3368 						    mfi_cmd->cmd_id.context.smid - 1);
3369 						mrsas_dprint(sc, MRSAS_OCR,
3370 						    "Re-fire command DCMD opcode 0x%x index %d\n ",
3371 						    mfi_cmd->frame->dcmd.opcode, j);
3372 						if (!req_desc)
3373 							device_printf(sc->mrsas_dev,
3374 							    "Cannot build MPT cmd.\n");
3375 						else
3376 							mrsas_fire_cmd(sc, req_desc->addr.u.low,
3377 							    req_desc->addr.u.high);
3378 					}
3379 				}
3380 			}
3381 
3382 			/* Reset load balance info */
3383 			memset(sc->load_balance_info, 0,
3384 			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
3385 
3386 			if (mrsas_get_ctrl_info(sc)) {
3387 				mrsas_kill_hba(sc);
3388 				retval = FAIL;
3389 				goto out;
3390 			}
3391 			if (!mrsas_get_map_info(sc))
3392 				mrsas_sync_map_info(sc);
3393 
3394 			megasas_setup_jbod_map(sc);
3395 
3396 			if ((sc->is_ventura || sc->is_aero) && sc->streamDetectByLD) {
3397 				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
3398 					memset(sc->streamDetectByLD[i], 0, sizeof(LD_STREAM_DETECT));
3399 					sc->streamDetectByLD[i]->mruBitMap = MR_STREAM_BITMAP;
3400 				}
3401 			}
3402 
3403 			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3404 			mrsas_enable_intr(sc);
3405 			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3406 
3407 			/* Register AEN with FW for last sequence number */
3408 			class_locale.members.reserved = 0;
3409 			class_locale.members.locale = MR_EVT_LOCALE_ALL;
3410 			class_locale.members.class = MR_EVT_CLASS_DEBUG;
3411 
3412 			mtx_unlock(&sc->sim_lock);
3413 			if (mrsas_register_aen(sc, sc->last_seq_num,
3414 			    class_locale.word)) {
3415 				device_printf(sc->mrsas_dev,
3416 				    "ERROR: AEN registration FAILED from OCR !!! "
3417 				    "Further events from the controller cannot be notified."
3418 				    "Either there is some problem in the controller"
3419 				    "or the controller does not support AEN.\n"
3420 				    "Please contact to the SUPPORT TEAM if the problem persists\n");
3421 			}
3422 			mtx_lock(&sc->sim_lock);
3423 
3424 			/* Adapter reset completed successfully */
3425 			device_printf(sc->mrsas_dev, "Reset successful\n");
3426 			retval = SUCCESS;
3427 			goto out;
3428 		}
3429 		/* Reset failed, kill the adapter */
3430 		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
3431 		mrsas_kill_hba(sc);
3432 		retval = FAIL;
3433 	} else {
3434 		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3435 		mrsas_enable_intr(sc);
3436 		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
3437 	}
3438 out:
3439 	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
3440 	mrsas_dprint(sc, MRSAS_OCR,
3441 	    "Reset Exit with %d.\n", retval);
3442 	return retval;
3443 }
3444 
3445 /*
3446  * mrsas_kill_hba:	Kill HBA when OCR is not supported
3447  * input:			Adapter Context.
3448  *
3449  * This function will kill HBA when OCR is not supported.
3450  */
3451 void
mrsas_kill_hba(struct mrsas_softc * sc)3452 mrsas_kill_hba(struct mrsas_softc *sc)
3453 {
3454 	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
3455 	DELAY(1000 * 1000);
3456 	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
3457 	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
3458 	    MFI_STOP_ADP);
3459 	/* Flush */
3460 	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
3461 	mrsas_complete_outstanding_ioctls(sc);
3462 }
3463 
3464 /**
3465  * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
3466  * input:			Controller softc
3467  *
3468  * Returns void
3469  */
3470 void
mrsas_complete_outstanding_ioctls(struct mrsas_softc * sc)3471 mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
3472 {
3473 	int i;
3474 	struct mrsas_mpt_cmd *cmd_mpt;
3475 	struct mrsas_mfi_cmd *cmd_mfi;
3476 	u_int32_t count, MSIxIndex;
3477 
3478 	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3479 	for (i = 0; i < sc->max_fw_cmds; i++) {
3480 		cmd_mpt = sc->mpt_cmd_list[i];
3481 
3482 		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
3483 			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
3484 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
3485 				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3486 					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
3487 					    cmd_mpt->io_request->RaidContext.raid_context.status);
3488 			}
3489 		}
3490 	}
3491 }
3492 
3493 /*
3494  * mrsas_wait_for_outstanding:	Wait for outstanding commands
3495  * input:						Adapter Context.
3496  *
3497  * This function will wait for 180 seconds for outstanding commands to be
3498  * completed.
3499  */
3500 int
mrsas_wait_for_outstanding(struct mrsas_softc * sc,u_int8_t check_reason)3501 mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
3502 {
3503 	int i, outstanding, retval = 0;
3504 	u_int32_t fw_state, count, MSIxIndex;
3505 
3506 	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
3507 		if (sc->remove_in_progress) {
3508 			mrsas_dprint(sc, MRSAS_OCR,
3509 			    "Driver remove or shutdown called.\n");
3510 			retval = 1;
3511 			goto out;
3512 		}
3513 		/* Check if firmware is in fault state */
3514 		fw_state = mrsas_read_reg_with_retries(sc, offsetof(mrsas_reg_set,
3515 		    outbound_scratch_pad)) & MFI_STATE_MASK;
3516 		if (fw_state == MFI_STATE_FAULT) {
3517 			mrsas_dprint(sc, MRSAS_OCR,
3518 			    "Found FW in FAULT state, will reset adapter.\n");
3519 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3520 			mtx_unlock(&sc->sim_lock);
3521 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3522 				mrsas_complete_cmd(sc, MSIxIndex);
3523 			mtx_lock(&sc->sim_lock);
3524 			retval = 1;
3525 			goto out;
3526 		}
3527 		if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
3528 			mrsas_dprint(sc, MRSAS_OCR,
3529 			    "DCMD IO TIMEOUT detected, will reset adapter.\n");
3530 			retval = 1;
3531 			goto out;
3532 		}
3533 		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
3534 		if (!outstanding)
3535 			goto out;
3536 
3537 		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
3538 			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
3539 			    "commands to complete\n", i, outstanding);
3540 			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
3541 			mtx_unlock(&sc->sim_lock);
3542 			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
3543 				mrsas_complete_cmd(sc, MSIxIndex);
3544 			mtx_lock(&sc->sim_lock);
3545 		}
3546 		DELAY(1000 * 1000);
3547 	}
3548 
3549 	if (mrsas_atomic_read(&sc->fw_outstanding)) {
3550 		mrsas_dprint(sc, MRSAS_OCR,
3551 		    " pending commands remain after waiting,"
3552 		    " will reset adapter.\n");
3553 		retval = 1;
3554 	}
3555 out:
3556 	return retval;
3557 }
3558 
3559 /*
3560  * mrsas_release_mfi_cmd:	Return a cmd to free command pool
3561  * input:					Command packet for return to free cmd pool
3562  *
3563  * This function returns the MFI & MPT command to the command list.
3564  */
3565 void
mrsas_release_mfi_cmd(struct mrsas_mfi_cmd * cmd_mfi)3566 mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi)
3567 {
3568 	struct mrsas_softc *sc = cmd_mfi->sc;
3569 	struct mrsas_mpt_cmd *cmd_mpt;
3570 
3571 	mtx_lock(&sc->mfi_cmd_pool_lock);
3572 	/*
3573 	 * Release the mpt command (if at all it is allocated
3574 	 * associated with the mfi command
3575 	 */
3576 	if (cmd_mfi->cmd_id.context.smid) {
3577 		mtx_lock(&sc->mpt_cmd_pool_lock);
3578 		/* Get the mpt cmd from mfi cmd frame's smid value */
3579 		cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1];
3580 		cmd_mpt->flags = 0;
3581 		cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
3582 		TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next);
3583 		mtx_unlock(&sc->mpt_cmd_pool_lock);
3584 	}
3585 	/* Release the mfi command */
3586 	cmd_mfi->ccb_ptr = NULL;
3587 	cmd_mfi->cmd_id.frame_count = 0;
3588 	TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next);
3589 	mtx_unlock(&sc->mfi_cmd_pool_lock);
3590 
3591 	return;
3592 }
3593 
3594 /*
3595  * mrsas_get_controller_info:	Returns FW's controller structure
3596  * input:						Adapter soft state
3597  * 								Controller information structure
3598  *
3599  * Issues an internal command (DCMD) to get the FW's controller structure. This
3600  * information is mainly used to find out the maximum IO transfer per command
3601  * supported by the FW.
3602  */
3603 static int
mrsas_get_ctrl_info(struct mrsas_softc * sc)3604 mrsas_get_ctrl_info(struct mrsas_softc *sc)
3605 {
3606 	int retcode = 0;
3607 	u_int8_t do_ocr = 1;
3608 	struct mrsas_mfi_cmd *cmd;
3609 	struct mrsas_dcmd_frame *dcmd;
3610 
3611 	cmd = mrsas_get_mfi_cmd(sc);
3612 
3613 	if (!cmd) {
3614 		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3615 		return -ENOMEM;
3616 	}
3617 	dcmd = &cmd->frame->dcmd;
3618 
3619 	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3620 		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3621 		mrsas_release_mfi_cmd(cmd);
3622 		return -ENOMEM;
3623 	}
3624 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3625 
3626 	dcmd->cmd = MFI_CMD_DCMD;
3627 	dcmd->cmd_status = 0xFF;
3628 	dcmd->sge_count = 1;
3629 	dcmd->flags = MFI_FRAME_DIR_READ;
3630 	dcmd->timeout = 0;
3631 	dcmd->pad_0 = 0;
3632 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_ctrl_info));
3633 	dcmd->opcode = htole32(MR_DCMD_CTRL_GET_INFO);
3634 	dcmd->sgl.sge32[0].phys_addr = htole32(sc->ctlr_info_phys_addr & 0xFFFFFFFF);
3635 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_ctrl_info));
3636 
3637 	if (!sc->mask_interrupts)
3638 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
3639 	else
3640 		retcode = mrsas_issue_polled(sc, cmd);
3641 
3642 	if (retcode == ETIMEDOUT)
3643 		goto dcmd_timeout;
3644 	else {
3645 		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3646 		le32_to_cpus(&sc->ctrl_info->properties.OnOffProperties);
3647 		le32_to_cpus(&sc->ctrl_info->adapterOperations2);
3648 		le32_to_cpus(&sc->ctrl_info->adapterOperations3);
3649 		le16_to_cpus(&sc->ctrl_info->adapterOperations4);
3650 	}
3651 
3652 	do_ocr = 0;
3653 	mrsas_update_ext_vd_details(sc);
3654 
3655 	sc->use_seqnum_jbod_fp =
3656 	    sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
3657 	sc->support_morethan256jbod =
3658 		sc->ctrl_info->adapterOperations4.supportPdMapTargetId;
3659 
3660 	sc->disableOnlineCtrlReset =
3661 	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
3662 
3663 dcmd_timeout:
3664 	mrsas_free_ctlr_info_cmd(sc);
3665 
3666 	if (do_ocr)
3667 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
3668 
3669 	if (!sc->mask_interrupts)
3670 		mrsas_release_mfi_cmd(cmd);
3671 
3672 	return (retcode);
3673 }
3674 
3675 /*
3676  * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3677  * input:
3678  *	sc - Controller's softc
3679 */
3680 static void
mrsas_update_ext_vd_details(struct mrsas_softc * sc)3681 mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3682 {
3683 	u_int32_t ventura_map_sz = 0;
3684 	sc->max256vdSupport =
3685 		sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3686 
3687 	/* Below is additional check to address future FW enhancement */
3688 	if (sc->ctrl_info->max_lds > 64)
3689 		sc->max256vdSupport = 1;
3690 
3691 	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3692 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3693 	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3694 	    * MRSAS_MAX_DEV_PER_CHANNEL;
3695 	if (sc->max256vdSupport) {
3696 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3697 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3698 	} else {
3699 		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3700 		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3701 	}
3702 
3703 	if (sc->maxRaidMapSize) {
3704 		ventura_map_sz = sc->maxRaidMapSize *
3705 		    MR_MIN_MAP_SIZE;
3706 		sc->current_map_sz = ventura_map_sz;
3707 		sc->max_map_sz = ventura_map_sz;
3708 	} else {
3709 		sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3710 		    (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1));
3711 		sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3712 		sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3713 		if (sc->max256vdSupport)
3714 			sc->current_map_sz = sc->new_map_sz;
3715 		else
3716 			sc->current_map_sz = sc->old_map_sz;
3717 	}
3718 
3719 	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP_ALL);
3720 #if VD_EXT_DEBUG
3721 	device_printf(sc->mrsas_dev, "sc->maxRaidMapSize 0x%x \n",
3722 	    sc->maxRaidMapSize);
3723 	device_printf(sc->mrsas_dev,
3724 	    "new_map_sz = 0x%x, old_map_sz = 0x%x, "
3725 	    "ventura_map_sz = 0x%x, current_map_sz = 0x%x "
3726 	    "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx \n",
3727 	    sc->new_map_sz, sc->old_map_sz, ventura_map_sz,
3728 	    sc->current_map_sz, sc->drv_map_sz, sizeof(MR_DRV_RAID_MAP_ALL));
3729 #endif
3730 }
3731 
3732 /*
3733  * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3734  * input:						Adapter soft state
3735  *
3736  * Allocates DMAable memory for the controller info internal command.
3737  */
3738 int
mrsas_alloc_ctlr_info_cmd(struct mrsas_softc * sc)3739 mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3740 {
3741 	int ctlr_info_size;
3742 
3743 	/* Allocate get controller info command */
3744 	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3745 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3746 	    1, 0,
3747 	    BUS_SPACE_MAXADDR_32BIT,
3748 	    BUS_SPACE_MAXADDR,
3749 	    NULL, NULL,
3750 	    ctlr_info_size,
3751 	    1,
3752 	    ctlr_info_size,
3753 	    BUS_DMA_ALLOCNOW,
3754 	    NULL, NULL,
3755 	    &sc->ctlr_info_tag)) {
3756 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3757 		return (ENOMEM);
3758 	}
3759 	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3760 	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3761 		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3762 		return (ENOMEM);
3763 	}
3764 	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3765 	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3766 	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3767 		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3768 		return (ENOMEM);
3769 	}
3770 	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3771 	return (0);
3772 }
3773 
3774 /*
3775  * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3776  * input:						Adapter soft state
3777  *
3778  * Deallocates memory of the get controller info cmd.
3779  */
3780 void
mrsas_free_ctlr_info_cmd(struct mrsas_softc * sc)3781 mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3782 {
3783 	if (sc->ctlr_info_phys_addr)
3784 		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3785 	if (sc->ctlr_info_mem != NULL)
3786 		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3787 	if (sc->ctlr_info_tag != NULL)
3788 		bus_dma_tag_destroy(sc->ctlr_info_tag);
3789 }
3790 
3791 /*
3792  * mrsas_issue_polled:	Issues a polling command
3793  * inputs:				Adapter soft state
3794  * 						Command packet to be issued
3795  *
3796  * This function is for posting of internal commands to Firmware.  MFI requires
3797  * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3798  * the poll response timer is 180 seconds.
3799  */
3800 int
mrsas_issue_polled(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3801 mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3802 {
3803 	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3804 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3805 	int i, retcode = SUCCESS;
3806 
3807 	frame_hdr->cmd_status = 0xFF;
3808 	frame_hdr->flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
3809 
3810 	/* Issue the frame using inbound queue port */
3811 	if (mrsas_issue_dcmd(sc, cmd)) {
3812 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3813 		return (1);
3814 	}
3815 	/*
3816 	 * Poll response timer to wait for Firmware response.  While this
3817 	 * timer with the DELAY call could block CPU, the time interval for
3818 	 * this is only 1 millisecond.
3819 	 */
3820 	if (frame_hdr->cmd_status == 0xFF) {
3821 		for (i = 0; i < (max_wait * 1000); i++) {
3822 			if (frame_hdr->cmd_status == 0xFF)
3823 				DELAY(1000);
3824 			else
3825 				break;
3826 		}
3827 	}
3828 	if (frame_hdr->cmd_status == 0xFF) {
3829 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3830 		    "seconds from %s\n", max_wait, __func__);
3831 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3832 		    cmd->frame->dcmd.opcode);
3833 		retcode = ETIMEDOUT;
3834 	}
3835 	return (retcode);
3836 }
3837 
3838 /*
3839  * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3840  * input:				Adapter soft state mfi cmd pointer
3841  *
3842  * This function is called by mrsas_issued_blocked_cmd() and
3843  * mrsas_issued_polled(), to build the MPT command and then fire the command
3844  * to Firmware.
3845  */
3846 int
mrsas_issue_dcmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3847 mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3848 {
3849 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3850 
3851 	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3852 	if (!req_desc) {
3853 		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3854 		return (1);
3855 	}
3856 	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3857 
3858 	return (0);
3859 }
3860 
3861 /*
3862  * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3863  * input:				Adapter soft state mfi cmd to build
3864  *
3865  * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3866  * command and prepares the MPT command to send to Firmware.
3867  */
3868 MRSAS_REQUEST_DESCRIPTOR_UNION *
mrsas_build_mpt_cmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3869 mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3870 {
3871 	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3872 	u_int16_t index;
3873 
3874 	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3875 		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3876 		return NULL;
3877 	}
3878 	index = cmd->cmd_id.context.smid;
3879 
3880 	req_desc = mrsas_get_request_desc(sc, index - 1);
3881 	if (!req_desc)
3882 		return NULL;
3883 
3884 	req_desc->addr.Words = 0;
3885 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3886 
3887 	req_desc->SCSIIO.SMID = htole16(index);
3888 
3889 	return (req_desc);
3890 }
3891 
3892 /*
3893  * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3894  * input:						Adapter soft state mfi cmd pointer
3895  *
3896  * The MPT command and the io_request are setup as a passthru command. The SGE
3897  * chain address is set to frame_phys_addr of the MFI command.
3898  */
3899 u_int8_t
mrsas_build_mptmfi_passthru(struct mrsas_softc * sc,struct mrsas_mfi_cmd * mfi_cmd)3900 mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3901 {
3902 	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3903 	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3904 	struct mrsas_mpt_cmd *mpt_cmd;
3905 	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3906 
3907 	mpt_cmd = mrsas_get_mpt_cmd(sc);
3908 	if (!mpt_cmd)
3909 		return (1);
3910 
3911 	/* Save the smid. To be used for returning the cmd */
3912 	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3913 
3914 	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3915 
3916 	/*
3917 	 * For cmds where the flag is set, store the flag and check on
3918 	 * completion. For cmds with this flag, don't call
3919 	 * mrsas_complete_cmd.
3920 	 */
3921 
3922 	if (frame_hdr->flags & htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3923 		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3924 
3925 	io_req = mpt_cmd->io_request;
3926 
3927 	if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
3928 		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3929 
3930 		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3931 		sgl_ptr_end->Flags = 0;
3932 	}
3933 	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3934 
3935 	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3936 	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3937 	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3938 
3939 	mpi25_ieee_chain->Address = htole64(mfi_cmd->frame_phys_addr);
3940 
3941 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3942 	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3943 
3944 	mpi25_ieee_chain->Length = htole32(sc->max_chain_frame_sz);
3945 
3946 	return (0);
3947 }
3948 
3949 /*
3950  * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3951  * input:					Adapter soft state Command to be issued
3952  *
3953  * This function waits on an event for the command to be returned from the ISR.
3954  * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3955  * internal and ioctl commands.
3956  */
3957 int
mrsas_issue_blocked_cmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)3958 mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3959 {
3960 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3961 	unsigned long total_time = 0;
3962 	int retcode = SUCCESS;
3963 
3964 	/* Initialize cmd_status */
3965 	cmd->cmd_status = 0xFF;
3966 
3967 	/* Build MPT-MFI command for issue to FW */
3968 	if (mrsas_issue_dcmd(sc, cmd)) {
3969 		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3970 		return (1);
3971 	}
3972 	sc->chan = (void *)&cmd;
3973 
3974 	while (1) {
3975 		if (cmd->cmd_status == 0xFF) {
3976 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3977 		} else
3978 			break;
3979 
3980 		if (!cmd->sync_cmd) {	/* cmd->sync will be set for an IOCTL
3981 					 * command */
3982 			total_time++;
3983 			if (total_time >= max_wait) {
3984 				device_printf(sc->mrsas_dev,
3985 				    "Internal command timed out after %d seconds.\n", max_wait);
3986 				retcode = 1;
3987 				break;
3988 			}
3989 		}
3990 	}
3991 	sc->chan = NULL;
3992 
3993 	if (cmd->cmd_status == 0xFF) {
3994 		device_printf(sc->mrsas_dev, "DCMD timed out after %d "
3995 		    "seconds from %s\n", max_wait, __func__);
3996 		device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
3997 		    cmd->frame->dcmd.opcode);
3998 		retcode = ETIMEDOUT;
3999 	}
4000 	return (retcode);
4001 }
4002 
4003 /*
4004  * mrsas_complete_mptmfi_passthru:	Completes a command
4005  * input:	@sc:					Adapter soft state
4006  * 			@cmd:					Command to be completed
4007  * 			@status:				cmd completion status
4008  *
4009  * This function is called from mrsas_complete_cmd() after an interrupt is
4010  * received from Firmware, and io_request->Function is
4011  * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
4012  */
4013 void
mrsas_complete_mptmfi_passthru(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd,u_int8_t status)4014 mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
4015     u_int8_t status)
4016 {
4017 	struct mrsas_header *hdr = &cmd->frame->hdr;
4018 	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
4019 
4020 	/* Reset the retry counter for future re-tries */
4021 	cmd->retry_for_fw_reset = 0;
4022 
4023 	if (cmd->ccb_ptr)
4024 		cmd->ccb_ptr = NULL;
4025 
4026 	switch (hdr->cmd) {
4027 	case MFI_CMD_INVALID:
4028 		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
4029 		break;
4030 	case MFI_CMD_PD_SCSI_IO:
4031 	case MFI_CMD_LD_SCSI_IO:
4032 		/*
4033 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
4034 		 * issued either through an IO path or an IOCTL path. If it
4035 		 * was via IOCTL, we will send it to internal completion.
4036 		 */
4037 		if (cmd->sync_cmd) {
4038 			cmd->sync_cmd = 0;
4039 			mrsas_wakeup(sc, cmd);
4040 			break;
4041 		}
4042 	case MFI_CMD_SMP:
4043 	case MFI_CMD_STP:
4044 	case MFI_CMD_DCMD:
4045 		/* Check for LD map update */
4046 		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
4047 		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
4048 			sc->fast_path_io = 0;
4049 			mtx_lock(&sc->raidmap_lock);
4050 			sc->map_update_cmd = NULL;
4051 			if (cmd_status != 0) {
4052 				if (cmd_status != MFI_STAT_NOT_FOUND)
4053 					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
4054 				else {
4055 					mrsas_release_mfi_cmd(cmd);
4056 					mtx_unlock(&sc->raidmap_lock);
4057 					break;
4058 				}
4059 			} else
4060 				sc->map_id++;
4061 			mrsas_release_mfi_cmd(cmd);
4062 			if (MR_ValidateMapInfo(sc))
4063 				sc->fast_path_io = 0;
4064 			else
4065 				sc->fast_path_io = 1;
4066 			mrsas_sync_map_info(sc);
4067 			mtx_unlock(&sc->raidmap_lock);
4068 			break;
4069 		}
4070 		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
4071 		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
4072 			sc->mrsas_aen_triggered = 0;
4073 		}
4074 		/* FW has an updated PD sequence */
4075 		if ((cmd->frame->dcmd.opcode ==
4076 		    MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4077 		    (cmd->frame->dcmd.mbox.b[0] == 1)) {
4078 			mtx_lock(&sc->raidmap_lock);
4079 			sc->jbod_seq_cmd = NULL;
4080 			mrsas_release_mfi_cmd(cmd);
4081 
4082 			if (cmd_status == MFI_STAT_OK) {
4083 				sc->pd_seq_map_id++;
4084 				/* Re-register a pd sync seq num cmd */
4085 				if (megasas_sync_pd_seq_num(sc, true))
4086 					sc->use_seqnum_jbod_fp = 0;
4087 			} else {
4088 				sc->use_seqnum_jbod_fp = 0;
4089 				device_printf(sc->mrsas_dev,
4090 				    "Jbod map sync failed, status=%x\n", cmd_status);
4091 			}
4092 			mtx_unlock(&sc->raidmap_lock);
4093 			break;
4094 		}
4095 		/* See if got an event notification */
4096 		if (le32toh(cmd->frame->dcmd.opcode) == MR_DCMD_CTRL_EVENT_WAIT)
4097 			mrsas_complete_aen(sc, cmd);
4098 		else
4099 			mrsas_wakeup(sc, cmd);
4100 		break;
4101 	case MFI_CMD_ABORT:
4102 		/* Command issued to abort another cmd return */
4103 		mrsas_complete_abort(sc, cmd);
4104 		break;
4105 	default:
4106 		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
4107 		break;
4108 	}
4109 }
4110 
4111 /*
4112  * mrsas_wakeup:	Completes an internal command
4113  * input:			Adapter soft state
4114  * 					Command to be completed
4115  *
4116  * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
4117  * timer is started.  This function is called from
4118  * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
4119  * from the command wait.
4120  */
4121 void
mrsas_wakeup(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)4122 mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4123 {
4124 	cmd->cmd_status = cmd->frame->io.cmd_status;
4125 
4126 	if (cmd->cmd_status == 0xFF)
4127 		cmd->cmd_status = 0;
4128 
4129 	sc->chan = (void *)&cmd;
4130 	wakeup_one((void *)&sc->chan);
4131 	return;
4132 }
4133 
4134 /*
4135  * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
4136  * Adapter soft state Shutdown/Hibernate
4137  *
4138  * This function issues a DCMD internal command to Firmware to initiate shutdown
4139  * of the controller.
4140  */
4141 static void
mrsas_shutdown_ctlr(struct mrsas_softc * sc,u_int32_t opcode)4142 mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
4143 {
4144 	struct mrsas_mfi_cmd *cmd;
4145 	struct mrsas_dcmd_frame *dcmd;
4146 
4147 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4148 		return;
4149 
4150 	cmd = mrsas_get_mfi_cmd(sc);
4151 	if (!cmd) {
4152 		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
4153 		return;
4154 	}
4155 	if (sc->aen_cmd)
4156 		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
4157 	if (sc->map_update_cmd)
4158 		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
4159 	if (sc->jbod_seq_cmd)
4160 		mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
4161 
4162 	dcmd = &cmd->frame->dcmd;
4163 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4164 
4165 	dcmd->cmd = MFI_CMD_DCMD;
4166 	dcmd->cmd_status = 0x0;
4167 	dcmd->sge_count = 0;
4168 	dcmd->flags = MFI_FRAME_DIR_NONE;
4169 	dcmd->timeout = 0;
4170 	dcmd->pad_0 = 0;
4171 	dcmd->data_xfer_len = 0;
4172 	dcmd->opcode = opcode;
4173 
4174 	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
4175 
4176 	mrsas_issue_blocked_cmd(sc, cmd);
4177 	mrsas_release_mfi_cmd(cmd);
4178 
4179 	return;
4180 }
4181 
4182 /*
4183  * mrsas_flush_cache:         Requests FW to flush all its caches input:
4184  * Adapter soft state
4185  *
4186  * This function is issues a DCMD internal command to Firmware to initiate
4187  * flushing of all caches.
4188  */
4189 static void
mrsas_flush_cache(struct mrsas_softc * sc)4190 mrsas_flush_cache(struct mrsas_softc *sc)
4191 {
4192 	struct mrsas_mfi_cmd *cmd;
4193 	struct mrsas_dcmd_frame *dcmd;
4194 
4195 	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
4196 		return;
4197 
4198 	cmd = mrsas_get_mfi_cmd(sc);
4199 	if (!cmd) {
4200 		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
4201 		return;
4202 	}
4203 	dcmd = &cmd->frame->dcmd;
4204 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4205 
4206 	dcmd->cmd = MFI_CMD_DCMD;
4207 	dcmd->cmd_status = 0x0;
4208 	dcmd->sge_count = 0;
4209 	dcmd->flags = MFI_FRAME_DIR_NONE;
4210 	dcmd->timeout = 0;
4211 	dcmd->pad_0 = 0;
4212 	dcmd->data_xfer_len = 0;
4213 	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
4214 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
4215 
4216 	mrsas_issue_blocked_cmd(sc, cmd);
4217 	mrsas_release_mfi_cmd(cmd);
4218 
4219 	return;
4220 }
4221 
4222 int
megasas_sync_pd_seq_num(struct mrsas_softc * sc,boolean_t pend)4223 megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
4224 {
4225 	int retcode = 0;
4226 	u_int8_t do_ocr = 1;
4227 	struct mrsas_mfi_cmd *cmd;
4228 	struct mrsas_dcmd_frame *dcmd;
4229 	uint32_t pd_seq_map_sz;
4230 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4231 	bus_addr_t pd_seq_h;
4232 
4233 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4234 	    (sizeof(struct MR_PD_CFG_SEQ) *
4235 	    (MAX_PHYSICAL_DEVICES - 1));
4236 
4237 	cmd = mrsas_get_mfi_cmd(sc);
4238 	if (!cmd) {
4239 		device_printf(sc->mrsas_dev,
4240 		    "Cannot alloc for ld map info cmd.\n");
4241 		return 1;
4242 	}
4243 	dcmd = &cmd->frame->dcmd;
4244 
4245 	pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
4246 	pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
4247 	if (!pd_sync) {
4248 		device_printf(sc->mrsas_dev,
4249 		    "Failed to alloc mem for jbod map info.\n");
4250 		mrsas_release_mfi_cmd(cmd);
4251 		return (ENOMEM);
4252 	}
4253 	memset(pd_sync, 0, pd_seq_map_sz);
4254 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4255 	dcmd->cmd = MFI_CMD_DCMD;
4256 	dcmd->cmd_status = 0xFF;
4257 	dcmd->sge_count = 1;
4258 	dcmd->timeout = 0;
4259 	dcmd->pad_0 = 0;
4260 	dcmd->data_xfer_len = htole32(pd_seq_map_sz);
4261 	dcmd->opcode = htole32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
4262 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_seq_h & 0xFFFFFFFF);
4263 	dcmd->sgl.sge32[0].length = htole32(pd_seq_map_sz);
4264 
4265 	if (pend) {
4266 		dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
4267 		dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4268 		sc->jbod_seq_cmd = cmd;
4269 		if (mrsas_issue_dcmd(sc, cmd)) {
4270 			device_printf(sc->mrsas_dev,
4271 			    "Fail to send sync map info command.\n");
4272 			return 1;
4273 		} else
4274 			return 0;
4275 	} else
4276 		dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4277 
4278 	retcode = mrsas_issue_polled(sc, cmd);
4279 	if (retcode == ETIMEDOUT)
4280 		goto dcmd_timeout;
4281 
4282 	if (le32toh(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
4283 		device_printf(sc->mrsas_dev,
4284 		    "driver supports max %d JBOD, but FW reports %d\n",
4285 		    MAX_PHYSICAL_DEVICES, pd_sync->count);
4286 		retcode = -EINVAL;
4287 	}
4288 	if (!retcode)
4289 		sc->pd_seq_map_id++;
4290 	do_ocr = 0;
4291 
4292 dcmd_timeout:
4293 	if (do_ocr)
4294 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4295 
4296 	return (retcode);
4297 }
4298 
4299 /*
4300  * mrsas_get_map_info:        Load and validate RAID map input:
4301  * Adapter instance soft state
4302  *
4303  * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
4304  * and validate RAID map.  It returns 0 if successful, 1 other- wise.
4305  */
4306 static int
mrsas_get_map_info(struct mrsas_softc * sc)4307 mrsas_get_map_info(struct mrsas_softc *sc)
4308 {
4309 	uint8_t retcode = 0;
4310 
4311 	sc->fast_path_io = 0;
4312 	if (!mrsas_get_ld_map_info(sc)) {
4313 		retcode = MR_ValidateMapInfo(sc);
4314 		if (retcode == 0) {
4315 			sc->fast_path_io = 1;
4316 			return 0;
4317 		}
4318 	}
4319 	return 1;
4320 }
4321 
4322 /*
4323  * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
4324  * Adapter instance soft state
4325  *
4326  * Issues an internal command (DCMD) to get the FW's controller PD list
4327  * structure.
4328  */
4329 static int
mrsas_get_ld_map_info(struct mrsas_softc * sc)4330 mrsas_get_ld_map_info(struct mrsas_softc *sc)
4331 {
4332 	int retcode = 0;
4333 	struct mrsas_mfi_cmd *cmd;
4334 	struct mrsas_dcmd_frame *dcmd;
4335 	void *map;
4336 	bus_addr_t map_phys_addr = 0;
4337 
4338 	cmd = mrsas_get_mfi_cmd(sc);
4339 	if (!cmd) {
4340 		device_printf(sc->mrsas_dev,
4341 		    "Cannot alloc for ld map info cmd.\n");
4342 		return 1;
4343 	}
4344 	dcmd = &cmd->frame->dcmd;
4345 
4346 	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
4347 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
4348 	if (!map) {
4349 		device_printf(sc->mrsas_dev,
4350 		    "Failed to alloc mem for ld map info.\n");
4351 		mrsas_release_mfi_cmd(cmd);
4352 		return (ENOMEM);
4353 	}
4354 	memset(map, 0, sizeof(sc->max_map_sz));
4355 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4356 
4357 	dcmd->cmd = MFI_CMD_DCMD;
4358 	dcmd->cmd_status = 0xFF;
4359 	dcmd->sge_count = 1;
4360 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4361 	dcmd->timeout = 0;
4362 	dcmd->pad_0 = 0;
4363 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4364 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4365 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4366 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4367 
4368 	retcode = mrsas_issue_polled(sc, cmd);
4369 	if (retcode == ETIMEDOUT)
4370 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4371 
4372 	return (retcode);
4373 }
4374 
4375 /*
4376  * mrsas_sync_map_info:        Get FW's ld_map structure input:
4377  * Adapter instance soft state
4378  *
4379  * Issues an internal command (DCMD) to get the FW's controller PD list
4380  * structure.
4381  */
4382 static int
mrsas_sync_map_info(struct mrsas_softc * sc)4383 mrsas_sync_map_info(struct mrsas_softc *sc)
4384 {
4385 	int retcode = 0, i;
4386 	struct mrsas_mfi_cmd *cmd;
4387 	struct mrsas_dcmd_frame *dcmd;
4388 	uint32_t num_lds;
4389 	MR_LD_TARGET_SYNC *target_map = NULL;
4390 	MR_DRV_RAID_MAP_ALL *map;
4391 	MR_LD_RAID *raid;
4392 	MR_LD_TARGET_SYNC *ld_sync;
4393 	bus_addr_t map_phys_addr = 0;
4394 
4395 	cmd = mrsas_get_mfi_cmd(sc);
4396 	if (!cmd) {
4397 		device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
4398 		return ENOMEM;
4399 	}
4400 	map = sc->ld_drv_map[sc->map_id & 1];
4401 	num_lds = map->raidMap.ldCount;
4402 
4403 	dcmd = &cmd->frame->dcmd;
4404 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4405 
4406 	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
4407 	memset(target_map, 0, sc->max_map_sz);
4408 
4409 	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
4410 
4411 	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
4412 
4413 	for (i = 0; i < num_lds; i++, ld_sync++) {
4414 		raid = MR_LdRaidGet(i, map);
4415 		ld_sync->targetId = MR_GetLDTgtId(i, map);
4416 		ld_sync->seqNum = raid->seqNum;
4417 	}
4418 
4419 	dcmd->cmd = MFI_CMD_DCMD;
4420 	dcmd->cmd_status = 0xFF;
4421 	dcmd->sge_count = 1;
4422 	dcmd->flags = htole16(MFI_FRAME_DIR_WRITE);
4423 	dcmd->timeout = 0;
4424 	dcmd->pad_0 = 0;
4425 	dcmd->data_xfer_len = htole32(sc->current_map_sz);
4426 	dcmd->mbox.b[0] = num_lds;
4427 	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
4428 	dcmd->opcode = htole32(MR_DCMD_LD_MAP_GET_INFO);
4429 	dcmd->sgl.sge32[0].phys_addr = htole32(map_phys_addr & 0xFFFFFFFF);
4430 	dcmd->sgl.sge32[0].length = htole32(sc->current_map_sz);
4431 
4432 	sc->map_update_cmd = cmd;
4433 	if (mrsas_issue_dcmd(sc, cmd)) {
4434 		device_printf(sc->mrsas_dev,
4435 		    "Fail to send sync map info command.\n");
4436 		return (1);
4437 	}
4438 	return (retcode);
4439 }
4440 
4441 /* Input:	dcmd.opcode		- MR_DCMD_PD_GET_INFO
4442   *		dcmd.mbox.s[0]		- deviceId for this physical drive
4443   *		dcmd.sge IN		- ptr to returned MR_PD_INFO structure
4444   * Desc:	Firmware return the physical drive info structure
4445   *
4446   */
4447 static void
mrsas_get_pd_info(struct mrsas_softc * sc,u_int16_t device_id)4448 mrsas_get_pd_info(struct mrsas_softc *sc, u_int16_t device_id)
4449 {
4450 	int retcode;
4451 	u_int8_t do_ocr = 1;
4452 	struct mrsas_mfi_cmd *cmd;
4453 	struct mrsas_dcmd_frame *dcmd;
4454 
4455 	cmd = mrsas_get_mfi_cmd(sc);
4456 
4457 	if (!cmd) {
4458 		device_printf(sc->mrsas_dev,
4459 		    "Cannot alloc for get PD info cmd\n");
4460 		return;
4461 	}
4462 	dcmd = &cmd->frame->dcmd;
4463 
4464 	memset(sc->pd_info_mem, 0, sizeof(struct mrsas_pd_info));
4465 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4466 
4467 	dcmd->mbox.s[0] = htole16(device_id);
4468 	dcmd->cmd = MFI_CMD_DCMD;
4469 	dcmd->cmd_status = 0xFF;
4470 	dcmd->sge_count = 1;
4471 	dcmd->flags = MFI_FRAME_DIR_READ;
4472 	dcmd->timeout = 0;
4473 	dcmd->pad_0 = 0;
4474 	dcmd->data_xfer_len = htole32(sizeof(struct mrsas_pd_info));
4475 	dcmd->opcode = htole32(MR_DCMD_PD_GET_INFO);
4476 	dcmd->sgl.sge32[0].phys_addr = htole32((u_int32_t)sc->pd_info_phys_addr & 0xFFFFFFFF);
4477 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct mrsas_pd_info));
4478 
4479 	if (!sc->mask_interrupts)
4480 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4481 	else
4482 		retcode = mrsas_issue_polled(sc, cmd);
4483 
4484 	if (retcode == ETIMEDOUT)
4485 		goto dcmd_timeout;
4486 
4487 	sc->target_list[device_id].interface_type =
4488 		le16toh(sc->pd_info_mem->state.ddf.pdType.intf);
4489 
4490 	do_ocr = 0;
4491 
4492 dcmd_timeout:
4493 
4494 	if (do_ocr)
4495 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4496 
4497 	if (!sc->mask_interrupts)
4498 		mrsas_release_mfi_cmd(cmd);
4499 }
4500 
4501 /*
4502  * mrsas_add_target:				Add target ID of system PD/VD to driver's data structure.
4503  * sc:						Adapter's soft state
4504  * target_id:					Unique target id per controller(managed by driver)
4505  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4506  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4507  * return:					void
4508  * Descripton:					This function will be called whenever system PD or VD is created.
4509  */
mrsas_add_target(struct mrsas_softc * sc,u_int16_t target_id)4510 static void mrsas_add_target(struct mrsas_softc *sc,
4511 	u_int16_t target_id)
4512 {
4513 	sc->target_list[target_id].target_id = target_id;
4514 
4515 	device_printf(sc->mrsas_dev,
4516 		"%s created target ID: 0x%x\n",
4517 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4518 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4519 	/*
4520 	 * If interrupts are enabled, then only fire DCMD to get pd_info
4521 	 * for system PDs
4522 	 */
4523 	if (!sc->mask_interrupts && sc->pd_info_mem &&
4524 		(target_id < MRSAS_MAX_PD))
4525 		mrsas_get_pd_info(sc, target_id);
4526 
4527 }
4528 
4529 /*
4530  * mrsas_remove_target:			Remove target ID of system PD/VD from driver's data structure.
4531  * sc:						Adapter's soft state
4532  * target_id:					Unique target id per controller(managed by driver)
4533  *						for system PDs- target ID ranges from 0 to (MRSAS_MAX_PD - 1)
4534  *						for VDs- target ID ranges from MRSAS_MAX_PD to MRSAS_MAX_TM_TARGETS
4535  * return:					void
4536  * Descripton:					This function will be called whenever system PD or VD is deleted
4537  */
mrsas_remove_target(struct mrsas_softc * sc,u_int16_t target_id)4538 static void mrsas_remove_target(struct mrsas_softc *sc,
4539 	u_int16_t target_id)
4540 {
4541 	sc->target_list[target_id].target_id = 0xffff;
4542 	device_printf(sc->mrsas_dev,
4543 		"%s deleted target ID: 0x%x\n",
4544 		(target_id < MRSAS_MAX_PD ? "System PD" : "VD"),
4545 		(target_id < MRSAS_MAX_PD ? target_id : (target_id - MRSAS_MAX_PD)));
4546 }
4547 
4548 /*
4549  * mrsas_get_pd_list:           Returns FW's PD list structure input:
4550  * Adapter soft state
4551  *
4552  * Issues an internal command (DCMD) to get the FW's controller PD list
4553  * structure.  This information is mainly used to find out about system
4554  * supported by Firmware.
4555  */
4556 static int
mrsas_get_pd_list(struct mrsas_softc * sc)4557 mrsas_get_pd_list(struct mrsas_softc *sc)
4558 {
4559 	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
4560 	u_int8_t do_ocr = 1;
4561 	struct mrsas_mfi_cmd *cmd;
4562 	struct mrsas_dcmd_frame *dcmd;
4563 	struct MR_PD_LIST *pd_list_mem;
4564 	struct MR_PD_ADDRESS *pd_addr;
4565 	bus_addr_t pd_list_phys_addr = 0;
4566 	struct mrsas_tmp_dcmd *tcmd;
4567 	u_int16_t dev_id;
4568 
4569 	cmd = mrsas_get_mfi_cmd(sc);
4570 	if (!cmd) {
4571 		device_printf(sc->mrsas_dev,
4572 		    "Cannot alloc for get PD list cmd\n");
4573 		return 1;
4574 	}
4575 	dcmd = &cmd->frame->dcmd;
4576 
4577 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4578 	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
4579 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
4580 		device_printf(sc->mrsas_dev,
4581 		    "Cannot alloc dmamap for get PD list cmd\n");
4582 		mrsas_release_mfi_cmd(cmd);
4583 		mrsas_free_tmp_dcmd(tcmd);
4584 		free(tcmd, M_MRSAS);
4585 		return (ENOMEM);
4586 	} else {
4587 		pd_list_mem = tcmd->tmp_dcmd_mem;
4588 		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4589 	}
4590 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4591 
4592 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4593 	dcmd->mbox.b[1] = 0;
4594 	dcmd->cmd = MFI_CMD_DCMD;
4595 	dcmd->cmd_status = 0xFF;
4596 	dcmd->sge_count = 1;
4597 	dcmd->flags = htole16(MFI_FRAME_DIR_READ);
4598 	dcmd->timeout = 0;
4599 	dcmd->pad_0 = 0;
4600 	dcmd->data_xfer_len = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4601 	dcmd->opcode = htole32(MR_DCMD_PD_LIST_QUERY);
4602 	dcmd->sgl.sge32[0].phys_addr = htole32(pd_list_phys_addr & 0xFFFFFFFF);
4603 	dcmd->sgl.sge32[0].length = htole32(MRSAS_MAX_PD * sizeof(struct MR_PD_LIST));
4604 
4605 	if (!sc->mask_interrupts)
4606 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4607 	else
4608 		retcode = mrsas_issue_polled(sc, cmd);
4609 
4610 	if (retcode == ETIMEDOUT)
4611 		goto dcmd_timeout;
4612 
4613 	/* Get the instance PD list */
4614 	pd_count = MRSAS_MAX_PD;
4615 	pd_addr = pd_list_mem->addr;
4616 	if (le32toh(pd_list_mem->count) < pd_count) {
4617 		memset(sc->local_pd_list, 0,
4618 		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
4619 		for (pd_index = 0; pd_index < le32toh(pd_list_mem->count); pd_index++) {
4620 			dev_id = le16toh(pd_addr->deviceId);
4621 			sc->local_pd_list[dev_id].tid = dev_id;
4622 			sc->local_pd_list[dev_id].driveType =
4623 			    le16toh(pd_addr->scsiDevType);
4624 			sc->local_pd_list[dev_id].driveState =
4625 			    MR_PD_STATE_SYSTEM;
4626 			if (sc->target_list[dev_id].target_id == 0xffff)
4627 				mrsas_add_target(sc, dev_id);
4628 			pd_addr++;
4629 		}
4630 		for (pd_index = 0; pd_index < MRSAS_MAX_PD; pd_index++) {
4631 			if ((sc->local_pd_list[pd_index].driveState !=
4632 				MR_PD_STATE_SYSTEM) &&
4633 				(sc->target_list[pd_index].target_id !=
4634 				0xffff)) {
4635 				mrsas_remove_target(sc, pd_index);
4636 			}
4637 		}
4638 		/*
4639 		 * Use mutext/spinlock if pd_list component size increase more than
4640 		 * 32 bit.
4641 		 */
4642 		memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
4643 		do_ocr = 0;
4644 	}
4645 dcmd_timeout:
4646 	mrsas_free_tmp_dcmd(tcmd);
4647 	free(tcmd, M_MRSAS);
4648 
4649 	if (do_ocr)
4650 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4651 
4652 	if (!sc->mask_interrupts)
4653 		mrsas_release_mfi_cmd(cmd);
4654 
4655 	return (retcode);
4656 }
4657 
4658 /*
4659  * mrsas_get_ld_list:           Returns FW's LD list structure input:
4660  * Adapter soft state
4661  *
4662  * Issues an internal command (DCMD) to get the FW's controller PD list
4663  * structure.  This information is mainly used to find out about supported by
4664  * the FW.
4665  */
4666 static int
mrsas_get_ld_list(struct mrsas_softc * sc)4667 mrsas_get_ld_list(struct mrsas_softc *sc)
4668 {
4669 	int ld_list_size, retcode = 0, ld_index = 0, ids = 0, drv_tgt_id;
4670 	u_int8_t do_ocr = 1;
4671 	struct mrsas_mfi_cmd *cmd;
4672 	struct mrsas_dcmd_frame *dcmd;
4673 	struct MR_LD_LIST *ld_list_mem;
4674 	bus_addr_t ld_list_phys_addr = 0;
4675 	struct mrsas_tmp_dcmd *tcmd;
4676 
4677 	cmd = mrsas_get_mfi_cmd(sc);
4678 	if (!cmd) {
4679 		device_printf(sc->mrsas_dev,
4680 		    "Cannot alloc for get LD list cmd\n");
4681 		return 1;
4682 	}
4683 	dcmd = &cmd->frame->dcmd;
4684 
4685 	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
4686 	ld_list_size = sizeof(struct MR_LD_LIST);
4687 	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
4688 		device_printf(sc->mrsas_dev,
4689 		    "Cannot alloc dmamap for get LD list cmd\n");
4690 		mrsas_release_mfi_cmd(cmd);
4691 		mrsas_free_tmp_dcmd(tcmd);
4692 		free(tcmd, M_MRSAS);
4693 		return (ENOMEM);
4694 	} else {
4695 		ld_list_mem = tcmd->tmp_dcmd_mem;
4696 		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
4697 	}
4698 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4699 
4700 	if (sc->max256vdSupport)
4701 		dcmd->mbox.b[0] = 1;
4702 
4703 	dcmd->cmd = MFI_CMD_DCMD;
4704 	dcmd->cmd_status = 0xFF;
4705 	dcmd->sge_count = 1;
4706 	dcmd->flags = MFI_FRAME_DIR_READ;
4707 	dcmd->timeout = 0;
4708 	dcmd->data_xfer_len = htole32(sizeof(struct MR_LD_LIST));
4709 	dcmd->opcode = htole32(MR_DCMD_LD_GET_LIST);
4710 	dcmd->sgl.sge32[0].phys_addr = htole32(ld_list_phys_addr);
4711 	dcmd->sgl.sge32[0].length = htole32(sizeof(struct MR_LD_LIST));
4712 	dcmd->pad_0 = 0;
4713 
4714 	if (!sc->mask_interrupts)
4715 		retcode = mrsas_issue_blocked_cmd(sc, cmd);
4716 	else
4717 		retcode = mrsas_issue_polled(sc, cmd);
4718 
4719 	if (retcode == ETIMEDOUT)
4720 		goto dcmd_timeout;
4721 
4722 #if VD_EXT_DEBUG
4723 	printf("Number of LDs %d\n", ld_list_mem->ldCount);
4724 #endif
4725 
4726 	/* Get the instance LD list */
4727 	if (le32toh(ld_list_mem->ldCount) <= sc->fw_supported_vd_count) {
4728 		sc->CurLdCount = le32toh(ld_list_mem->ldCount);
4729 		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4730 		for (ld_index = 0; ld_index < le32toh(ld_list_mem->ldCount); ld_index++) {
4731 			ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4732 			drv_tgt_id = ids + MRSAS_MAX_PD;
4733 			if (ld_list_mem->ldList[ld_index].state != 0) {
4734 				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
4735 				if (sc->target_list[drv_tgt_id].target_id ==
4736 					0xffff)
4737 					mrsas_add_target(sc, drv_tgt_id);
4738 			} else {
4739 				if (sc->target_list[drv_tgt_id].target_id !=
4740 					0xffff)
4741 					mrsas_remove_target(sc,
4742 						drv_tgt_id);
4743 			}
4744 		}
4745 
4746 		do_ocr = 0;
4747 	}
4748 dcmd_timeout:
4749 	mrsas_free_tmp_dcmd(tcmd);
4750 	free(tcmd, M_MRSAS);
4751 
4752 	if (do_ocr)
4753 		sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
4754 	if (!sc->mask_interrupts)
4755 		mrsas_release_mfi_cmd(cmd);
4756 
4757 	return (retcode);
4758 }
4759 
4760 /*
4761  * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
4762  * Adapter soft state Temp command Size of allocation
4763  *
4764  * Allocates DMAable memory for a temporary internal command. The allocated
4765  * memory is initialized to all zeros upon successful loading of the dma
4766  * mapped memory.
4767  */
4768 int
mrsas_alloc_tmp_dcmd(struct mrsas_softc * sc,struct mrsas_tmp_dcmd * tcmd,int size)4769 mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
4770     struct mrsas_tmp_dcmd *tcmd, int size)
4771 {
4772 	if (bus_dma_tag_create(sc->mrsas_parent_tag,
4773 	    1, 0,
4774 	    BUS_SPACE_MAXADDR_32BIT,
4775 	    BUS_SPACE_MAXADDR,
4776 	    NULL, NULL,
4777 	    size,
4778 	    1,
4779 	    size,
4780 	    BUS_DMA_ALLOCNOW,
4781 	    NULL, NULL,
4782 	    &tcmd->tmp_dcmd_tag)) {
4783 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
4784 		return (ENOMEM);
4785 	}
4786 	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
4787 	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
4788 		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
4789 		return (ENOMEM);
4790 	}
4791 	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
4792 	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
4793 	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
4794 		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
4795 		return (ENOMEM);
4796 	}
4797 	memset(tcmd->tmp_dcmd_mem, 0, size);
4798 	return (0);
4799 }
4800 
4801 /*
4802  * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
4803  * temporary dcmd pointer
4804  *
4805  * Deallocates memory of the temporary command for use in the construction of
4806  * the internal DCMD.
4807  */
4808 void
mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd * tmp)4809 mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
4810 {
4811 	if (tmp->tmp_dcmd_phys_addr)
4812 		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
4813 	if (tmp->tmp_dcmd_mem != NULL)
4814 		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
4815 	if (tmp->tmp_dcmd_tag != NULL)
4816 		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
4817 }
4818 
4819 /*
4820  * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
4821  * Adapter soft state Previously issued cmd to be aborted
4822  *
4823  * This function is used to abort previously issued commands, such as AEN and
4824  * RAID map sync map commands.  The abort command is sent as a DCMD internal
4825  * command and subsequently the driver will wait for a return status.  The
4826  * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
4827  */
4828 static int
mrsas_issue_blocked_abort_cmd(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd_to_abort)4829 mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
4830     struct mrsas_mfi_cmd *cmd_to_abort)
4831 {
4832 	struct mrsas_mfi_cmd *cmd;
4833 	struct mrsas_abort_frame *abort_fr;
4834 	u_int8_t retcode = 0;
4835 	unsigned long total_time = 0;
4836 	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
4837 
4838 	cmd = mrsas_get_mfi_cmd(sc);
4839 	if (!cmd) {
4840 		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
4841 		return (1);
4842 	}
4843 	abort_fr = &cmd->frame->abort;
4844 
4845 	/* Prepare and issue the abort frame */
4846 	abort_fr->cmd = MFI_CMD_ABORT;
4847 	abort_fr->cmd_status = 0xFF;
4848 	abort_fr->flags = 0;
4849 	abort_fr->abort_context = cmd_to_abort->index;
4850 	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
4851 	abort_fr->abort_mfi_phys_addr_hi = 0;
4852 
4853 	cmd->sync_cmd = 1;
4854 	cmd->cmd_status = 0xFF;
4855 
4856 	if (mrsas_issue_dcmd(sc, cmd)) {
4857 		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
4858 		return (1);
4859 	}
4860 	/* Wait for this cmd to complete */
4861 	sc->chan = (void *)&cmd;
4862 	while (1) {
4863 		if (cmd->cmd_status == 0xFF) {
4864 			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
4865 		} else
4866 			break;
4867 		total_time++;
4868 		if (total_time >= max_wait) {
4869 			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
4870 			retcode = 1;
4871 			break;
4872 		}
4873 	}
4874 
4875 	cmd->sync_cmd = 0;
4876 	mrsas_release_mfi_cmd(cmd);
4877 	return (retcode);
4878 }
4879 
4880 /*
4881  * mrsas_complete_abort:      Completes aborting a command input:
4882  * Adapter soft state Cmd that was issued to abort another cmd
4883  *
4884  * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
4885  * change after sending the command.  This function is called from
4886  * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
4887  */
4888 void
mrsas_complete_abort(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)4889 mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4890 {
4891 	if (cmd->sync_cmd) {
4892 		cmd->sync_cmd = 0;
4893 		cmd->cmd_status = 0;
4894 		sc->chan = (void *)&cmd;
4895 		wakeup_one((void *)&sc->chan);
4896 	}
4897 	return;
4898 }
4899 
4900 /*
4901  * mrsas_aen_handler:	AEN processing callback function from thread context
4902  * input:				Adapter soft state
4903  *
4904  * Asynchronous event handler
4905  */
4906 void
mrsas_aen_handler(struct mrsas_softc * sc)4907 mrsas_aen_handler(struct mrsas_softc *sc)
4908 {
4909 	union mrsas_evt_class_locale class_locale;
4910 	int doscan = 0;
4911 	u_int32_t seq_num;
4912  	int error, fail_aen = 0;
4913 
4914 	if (sc == NULL) {
4915 		printf("invalid instance!\n");
4916 		return;
4917 	}
4918 	if (sc->remove_in_progress || sc->reset_in_progress) {
4919 		device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n",
4920 			__func__, __LINE__);
4921 		return;
4922 	}
4923 	if (sc->evt_detail_mem) {
4924 		switch (sc->evt_detail_mem->code) {
4925 		case MR_EVT_PD_INSERTED:
4926 			fail_aen = mrsas_get_pd_list(sc);
4927 			if (!fail_aen)
4928 				mrsas_bus_scan_sim(sc, sc->sim_1);
4929 			else
4930 				goto skip_register_aen;
4931 			break;
4932 		case MR_EVT_PD_REMOVED:
4933 			fail_aen = mrsas_get_pd_list(sc);
4934 			if (!fail_aen)
4935 				mrsas_bus_scan_sim(sc, sc->sim_1);
4936 			else
4937 				goto skip_register_aen;
4938 			break;
4939 		case MR_EVT_LD_OFFLINE:
4940 		case MR_EVT_CFG_CLEARED:
4941 		case MR_EVT_LD_DELETED:
4942 			mrsas_bus_scan_sim(sc, sc->sim_0);
4943 			break;
4944 		case MR_EVT_LD_CREATED:
4945 			fail_aen = mrsas_get_ld_list(sc);
4946 			if (!fail_aen)
4947 				mrsas_bus_scan_sim(sc, sc->sim_0);
4948 			else
4949 				goto skip_register_aen;
4950 			break;
4951 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4952 		case MR_EVT_FOREIGN_CFG_IMPORTED:
4953 		case MR_EVT_LD_STATE_CHANGE:
4954 			doscan = 1;
4955 			break;
4956 		case MR_EVT_CTRL_PROP_CHANGED:
4957 			fail_aen = mrsas_get_ctrl_info(sc);
4958 			if (fail_aen)
4959 				goto skip_register_aen;
4960 			break;
4961 		default:
4962 			break;
4963 		}
4964 	} else {
4965 		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4966 		return;
4967 	}
4968 	if (doscan) {
4969 		fail_aen = mrsas_get_pd_list(sc);
4970 		if (!fail_aen) {
4971 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4972 			mrsas_bus_scan_sim(sc, sc->sim_1);
4973 		} else
4974 			goto skip_register_aen;
4975 
4976 		fail_aen = mrsas_get_ld_list(sc);
4977 		if (!fail_aen) {
4978 			mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4979 			mrsas_bus_scan_sim(sc, sc->sim_0);
4980 		} else
4981 			goto skip_register_aen;
4982 	}
4983 	seq_num = sc->evt_detail_mem->seq_num + 1;
4984 
4985 	/* Register AEN with FW for latest sequence number plus 1 */
4986 	class_locale.members.reserved = 0;
4987 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4988 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4989 
4990 	if (sc->aen_cmd != NULL)
4991 		return;
4992 
4993 	mtx_lock(&sc->aen_lock);
4994 	error = mrsas_register_aen(sc, seq_num,
4995 	    class_locale.word);
4996 	mtx_unlock(&sc->aen_lock);
4997 
4998 	if (error)
4999 		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
5000 
5001 skip_register_aen:
5002 	return;
5003 
5004 }
5005 
5006 /*
5007  * mrsas_complete_aen:	Completes AEN command
5008  * input:				Adapter soft state
5009  * 						Cmd that was issued to abort another cmd
5010  *
5011  * This function will be called from ISR and will continue event processing from
5012  * thread context by enqueuing task in ev_tq (callback function
5013  * "mrsas_aen_handler").
5014  */
5015 void
mrsas_complete_aen(struct mrsas_softc * sc,struct mrsas_mfi_cmd * cmd)5016 mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
5017 {
5018 	/*
5019 	 * Don't signal app if it is just an aborted previously registered
5020 	 * aen
5021 	 */
5022 	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
5023 		sc->mrsas_aen_triggered = 1;
5024 		mtx_lock(&sc->aen_lock);
5025 		if (sc->mrsas_poll_waiting) {
5026 			sc->mrsas_poll_waiting = 0;
5027 			selwakeup(&sc->mrsas_select);
5028 		}
5029 		mtx_unlock(&sc->aen_lock);
5030 	} else
5031 		cmd->abort_aen = 0;
5032 
5033 	sc->aen_cmd = NULL;
5034 	mrsas_release_mfi_cmd(cmd);
5035 
5036 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
5037 
5038 	return;
5039 }
5040 
5041 static device_method_t mrsas_methods[] = {
5042 	DEVMETHOD(device_probe, mrsas_probe),
5043 	DEVMETHOD(device_attach, mrsas_attach),
5044 	DEVMETHOD(device_detach, mrsas_detach),
5045 	DEVMETHOD(device_shutdown, mrsas_shutdown),
5046 	DEVMETHOD(device_suspend, mrsas_suspend),
5047 	DEVMETHOD(device_resume, mrsas_resume),
5048 	DEVMETHOD(bus_print_child, bus_generic_print_child),
5049 	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
5050 	{0, 0}
5051 };
5052 
5053 static driver_t mrsas_driver = {
5054 	"mrsas",
5055 	mrsas_methods,
5056 	sizeof(struct mrsas_softc)
5057 };
5058 
5059 DRIVER_MODULE(mrsas, pci, mrsas_driver, 0, 0);
5060 MODULE_DEPEND(mrsas, cam, 1, 1, 1);
5061