xref: /linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 2c8c9aae4492f813b9b9ae95f0931945a693100e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 	struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 	struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24 
25 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)26 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
27 	spinlock_t *write_queue_lock)
28 {
29 	writeq(b, addr);
30 }
31 #else
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)32 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
33 	spinlock_t *write_queue_lock)
34 {
35 	__u64 data_out = b;
36 	unsigned long flags;
37 
38 	spin_lock_irqsave(write_queue_lock, flags);
39 	writel((u32)(data_out), addr);
40 	writel((u32)(data_out >> 32), (addr + 4));
41 	spin_unlock_irqrestore(write_queue_lock, flags);
42 }
43 #endif
44 
45 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)46 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
47 {
48 	u16 pi, ci, max_entries;
49 	bool is_qfull = false;
50 
51 	pi = op_req_q->pi;
52 	ci = READ_ONCE(op_req_q->ci);
53 	max_entries = op_req_q->num_requests;
54 
55 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
56 		is_qfull = true;
57 
58 	return is_qfull;
59 }
60 
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)61 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
62 {
63 	u16 i, max_vectors;
64 
65 	max_vectors = mrioc->intr_info_count;
66 
67 	for (i = 0; i < max_vectors; i++)
68 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
69 }
70 
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)71 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
72 {
73 	mrioc->intr_enabled = 0;
74 	mpi3mr_sync_irqs(mrioc);
75 }
76 
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)77 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
78 {
79 	mrioc->intr_enabled = 1;
80 }
81 
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)82 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
83 {
84 	u16 i;
85 
86 	mpi3mr_ioc_disable_intr(mrioc);
87 
88 	if (!mrioc->intr_info)
89 		return;
90 
91 	for (i = 0; i < mrioc->intr_info_count; i++)
92 		free_irq(pci_irq_vector(mrioc->pdev, i),
93 		    (mrioc->intr_info + i));
94 
95 	kfree(mrioc->intr_info);
96 	mrioc->intr_info = NULL;
97 	mrioc->intr_info_count = 0;
98 	mrioc->is_intr_info_set = false;
99 	pci_free_irq_vectors(mrioc->pdev);
100 }
101 
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)102 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
103 	dma_addr_t dma_addr)
104 {
105 	struct mpi3_sge_common *sgel = paddr;
106 
107 	sgel->flags = flags;
108 	sgel->length = cpu_to_le32(length);
109 	sgel->address = cpu_to_le64(dma_addr);
110 }
111 
mpi3mr_build_zero_len_sge(void * paddr)112 void mpi3mr_build_zero_len_sge(void *paddr)
113 {
114 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
115 
116 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
117 }
118 
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)119 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
120 	dma_addr_t phys_addr)
121 {
122 	if (!phys_addr)
123 		return NULL;
124 
125 	if ((phys_addr < mrioc->reply_buf_dma) ||
126 	    (phys_addr > mrioc->reply_buf_dma_max_address))
127 		return NULL;
128 
129 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
130 }
131 
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)132 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
133 	dma_addr_t phys_addr)
134 {
135 	if (!phys_addr)
136 		return NULL;
137 
138 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
139 }
140 
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)141 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
142 	u64 reply_dma)
143 {
144 	u32 old_idx = 0;
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
148 	old_idx  =  mrioc->reply_free_queue_host_index;
149 	mrioc->reply_free_queue_host_index = (
150 	    (mrioc->reply_free_queue_host_index ==
151 	    (mrioc->reply_free_qsz - 1)) ? 0 :
152 	    (mrioc->reply_free_queue_host_index + 1));
153 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
154 	writel(mrioc->reply_free_queue_host_index,
155 	    &mrioc->sysif_regs->reply_free_host_index);
156 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
157 }
158 
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)159 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
160 	u64 sense_buf_dma)
161 {
162 	u32 old_idx = 0;
163 	unsigned long flags;
164 
165 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
166 	old_idx  =  mrioc->sbq_host_index;
167 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
168 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
169 	    (mrioc->sbq_host_index + 1));
170 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
171 	writel(mrioc->sbq_host_index,
172 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
173 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
174 }
175 
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)176 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
177 	struct mpi3_event_notification_reply *event_reply)
178 {
179 	char *desc = NULL;
180 	u16 event;
181 
182 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
183 		return;
184 
185 	event = event_reply->event;
186 
187 	switch (event) {
188 	case MPI3_EVENT_LOG_DATA:
189 		desc = "Log Data";
190 		break;
191 	case MPI3_EVENT_CHANGE:
192 		desc = "Event Change";
193 		break;
194 	case MPI3_EVENT_GPIO_INTERRUPT:
195 		desc = "GPIO Interrupt";
196 		break;
197 	case MPI3_EVENT_CABLE_MGMT:
198 		desc = "Cable Management";
199 		break;
200 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
201 		desc = "Energy Pack Change";
202 		break;
203 	case MPI3_EVENT_DEVICE_ADDED:
204 	{
205 		struct mpi3_device_page0 *event_data =
206 		    (struct mpi3_device_page0 *)event_reply->event_data;
207 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
208 		    event_data->dev_handle, event_data->device_form);
209 		return;
210 	}
211 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
212 	{
213 		struct mpi3_device_page0 *event_data =
214 		    (struct mpi3_device_page0 *)event_reply->event_data;
215 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
216 		    event_data->dev_handle, event_data->device_form);
217 		return;
218 	}
219 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
220 	{
221 		struct mpi3_event_data_device_status_change *event_data =
222 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
223 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
224 		    event_data->dev_handle, event_data->reason_code);
225 		return;
226 	}
227 	case MPI3_EVENT_SAS_DISCOVERY:
228 	{
229 		struct mpi3_event_data_sas_discovery *event_data =
230 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
231 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
232 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
233 		    "start" : "stop",
234 		    le32_to_cpu(event_data->discovery_status));
235 		return;
236 	}
237 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
238 		desc = "SAS Broadcast Primitive";
239 		break;
240 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
241 		desc = "SAS Notify Primitive";
242 		break;
243 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
244 		desc = "SAS Init Device Status Change";
245 		break;
246 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
247 		desc = "SAS Init Table Overflow";
248 		break;
249 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
250 		desc = "SAS Topology Change List";
251 		break;
252 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
253 		desc = "Enclosure Device Status Change";
254 		break;
255 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
256 		desc = "Enclosure Added";
257 		break;
258 	case MPI3_EVENT_HARD_RESET_RECEIVED:
259 		desc = "Hard Reset Received";
260 		break;
261 	case MPI3_EVENT_SAS_PHY_COUNTER:
262 		desc = "SAS PHY Counter";
263 		break;
264 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
265 		desc = "SAS Device Discovery Error";
266 		break;
267 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
268 		desc = "PCIE Topology Change List";
269 		break;
270 	case MPI3_EVENT_PCIE_ENUMERATION:
271 	{
272 		struct mpi3_event_data_pcie_enumeration *event_data =
273 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
274 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
275 		    (event_data->reason_code ==
276 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
277 		if (event_data->enumeration_status)
278 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
279 			    le32_to_cpu(event_data->enumeration_status));
280 		return;
281 	}
282 	case MPI3_EVENT_PREPARE_FOR_RESET:
283 		desc = "Prepare For Reset";
284 		break;
285 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
286 		desc = "Diagnostic Buffer Status Change";
287 		break;
288 	}
289 
290 	if (!desc)
291 		return;
292 
293 	ioc_info(mrioc, "%s\n", desc);
294 }
295 
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)296 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
297 	struct mpi3_default_reply *def_reply)
298 {
299 	struct mpi3_event_notification_reply *event_reply =
300 	    (struct mpi3_event_notification_reply *)def_reply;
301 
302 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
303 	mpi3mr_print_event_data(mrioc, event_reply);
304 	mpi3mr_os_handle_events(mrioc, event_reply);
305 }
306 
307 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)308 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
309 	struct mpi3_default_reply *def_reply)
310 {
311 	u16 idx;
312 
313 	switch (host_tag) {
314 	case MPI3MR_HOSTTAG_INITCMDS:
315 		return &mrioc->init_cmds;
316 	case MPI3MR_HOSTTAG_CFG_CMDS:
317 		return &mrioc->cfg_cmds;
318 	case MPI3MR_HOSTTAG_BSG_CMDS:
319 		return &mrioc->bsg_cmds;
320 	case MPI3MR_HOSTTAG_BLK_TMS:
321 		return &mrioc->host_tm_cmds;
322 	case MPI3MR_HOSTTAG_PEL_ABORT:
323 		return &mrioc->pel_abort_cmd;
324 	case MPI3MR_HOSTTAG_PEL_WAIT:
325 		return &mrioc->pel_cmds;
326 	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
327 		return &mrioc->transport_cmds;
328 	case MPI3MR_HOSTTAG_INVALID:
329 		if (def_reply && def_reply->function ==
330 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
331 			mpi3mr_handle_events(mrioc, def_reply);
332 		return NULL;
333 	default:
334 		break;
335 	}
336 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
337 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
338 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
339 		return &mrioc->dev_rmhs_cmds[idx];
340 	}
341 
342 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
343 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
344 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
345 		return &mrioc->evtack_cmds[idx];
346 	}
347 
348 	return NULL;
349 }
350 
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)351 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
352 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
353 {
354 	u16 reply_desc_type, host_tag = 0;
355 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
356 	u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
357 	u32 ioc_loginfo = 0, sense_count = 0;
358 	struct mpi3_status_reply_descriptor *status_desc;
359 	struct mpi3_address_reply_descriptor *addr_desc;
360 	struct mpi3_success_reply_descriptor *success_desc;
361 	struct mpi3_default_reply *def_reply = NULL;
362 	struct mpi3mr_drv_cmd *cmdptr = NULL;
363 	struct mpi3_scsi_io_reply *scsi_reply;
364 	struct scsi_sense_hdr sshdr;
365 	u8 *sense_buf = NULL;
366 
367 	*reply_dma = 0;
368 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
369 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
370 	switch (reply_desc_type) {
371 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
372 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
373 		host_tag = le16_to_cpu(status_desc->host_tag);
374 		ioc_status = le16_to_cpu(status_desc->ioc_status);
375 		if (ioc_status &
376 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
377 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
378 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
379 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
380 		break;
381 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
382 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
383 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
384 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
385 		if (!def_reply)
386 			goto out;
387 		host_tag = le16_to_cpu(def_reply->host_tag);
388 		ioc_status = le16_to_cpu(def_reply->ioc_status);
389 		if (ioc_status &
390 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
391 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
392 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
393 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
394 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
395 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
396 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
397 			sense_count = le32_to_cpu(scsi_reply->sense_count);
398 			if (sense_buf) {
399 				scsi_normalize_sense(sense_buf, sense_count,
400 				    &sshdr);
401 				mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
402 				    sshdr.asc, sshdr.ascq);
403 			}
404 		}
405 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
406 		break;
407 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
408 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
409 		host_tag = le16_to_cpu(success_desc->host_tag);
410 		break;
411 	default:
412 		break;
413 	}
414 
415 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
416 	if (cmdptr) {
417 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
418 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
419 			cmdptr->ioc_loginfo = ioc_loginfo;
420 			if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
421 				cmdptr->ioc_status = ioc_status;
422 			else
423 				cmdptr->ioc_status = masked_ioc_status;
424 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
425 			if (def_reply) {
426 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
427 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
428 				    mrioc->reply_sz);
429 			}
430 			if (sense_buf && cmdptr->sensebuf) {
431 				cmdptr->is_sense = 1;
432 				memcpy(cmdptr->sensebuf, sense_buf,
433 				       MPI3MR_SENSE_BUF_SZ);
434 			}
435 			if (cmdptr->is_waiting) {
436 				cmdptr->is_waiting = 0;
437 				complete(&cmdptr->done);
438 			} else if (cmdptr->callback)
439 				cmdptr->callback(mrioc, cmdptr);
440 		}
441 	}
442 out:
443 	if (sense_buf)
444 		mpi3mr_repost_sense_buf(mrioc,
445 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
446 }
447 
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)448 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
449 {
450 	u32 exp_phase = mrioc->admin_reply_ephase;
451 	u32 admin_reply_ci = mrioc->admin_reply_ci;
452 	u32 num_admin_replies = 0;
453 	u64 reply_dma = 0;
454 	u16 threshold_comps = 0;
455 	struct mpi3_default_reply_descriptor *reply_desc;
456 
457 	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
458 		atomic_inc(&mrioc->admin_pend_isr);
459 		return 0;
460 	}
461 
462 	atomic_set(&mrioc->admin_pend_isr, 0);
463 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
464 	    admin_reply_ci;
465 
466 	if ((le16_to_cpu(reply_desc->reply_flags) &
467 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
468 		atomic_dec(&mrioc->admin_reply_q_in_use);
469 		return 0;
470 	}
471 
472 	do {
473 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
474 			break;
475 
476 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
477 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
478 		if (reply_dma)
479 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
480 		num_admin_replies++;
481 		threshold_comps++;
482 		if (++admin_reply_ci == mrioc->num_admin_replies) {
483 			admin_reply_ci = 0;
484 			exp_phase ^= 1;
485 		}
486 		reply_desc =
487 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
488 		    admin_reply_ci;
489 		if ((le16_to_cpu(reply_desc->reply_flags) &
490 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
491 			break;
492 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
493 			writel(admin_reply_ci,
494 			    &mrioc->sysif_regs->admin_reply_queue_ci);
495 			threshold_comps = 0;
496 		}
497 	} while (1);
498 
499 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
500 	mrioc->admin_reply_ci = admin_reply_ci;
501 	mrioc->admin_reply_ephase = exp_phase;
502 	atomic_dec(&mrioc->admin_reply_q_in_use);
503 
504 	return num_admin_replies;
505 }
506 
507 /**
508  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
509  *	queue's consumer index from operational reply descriptor queue.
510  * @op_reply_q: op_reply_qinfo object
511  * @reply_ci: operational reply descriptor's queue consumer index
512  *
513  * Returns: reply descriptor frame address
514  */
515 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)516 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
517 {
518 	void *segment_base_addr;
519 	struct segments *segments = op_reply_q->q_segments;
520 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
521 
522 	segment_base_addr =
523 	    segments[reply_ci / op_reply_q->segment_qd].segment;
524 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
525 	    (reply_ci % op_reply_q->segment_qd);
526 	return reply_desc;
527 }
528 
529 /**
530  * mpi3mr_process_op_reply_q - Operational reply queue handler
531  * @mrioc: Adapter instance reference
532  * @op_reply_q: Operational reply queue info
533  *
534  * Checks the specific operational reply queue and drains the
535  * reply queue entries until the queue is empty and process the
536  * individual reply descriptors.
537  *
538  * Return: 0 if queue is already processed,or number of reply
539  *	    descriptors processed.
540  */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)541 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
542 	struct op_reply_qinfo *op_reply_q)
543 {
544 	struct op_req_qinfo *op_req_q;
545 	u32 exp_phase;
546 	u32 reply_ci;
547 	u32 num_op_reply = 0;
548 	u64 reply_dma = 0;
549 	struct mpi3_default_reply_descriptor *reply_desc;
550 	u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
551 
552 	reply_qidx = op_reply_q->qid - 1;
553 
554 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
555 		return 0;
556 
557 	exp_phase = op_reply_q->ephase;
558 	reply_ci = op_reply_q->ci;
559 
560 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
561 	if ((le16_to_cpu(reply_desc->reply_flags) &
562 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
563 		atomic_dec(&op_reply_q->in_use);
564 		return 0;
565 	}
566 
567 	do {
568 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
569 			break;
570 
571 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
572 		op_req_q = &mrioc->req_qinfo[req_q_idx];
573 
574 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
575 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
576 		    reply_qidx);
577 
578 		if (reply_dma)
579 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
580 		num_op_reply++;
581 		threshold_comps++;
582 
583 		if (++reply_ci == op_reply_q->num_replies) {
584 			reply_ci = 0;
585 			exp_phase ^= 1;
586 		}
587 
588 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
589 
590 		if ((le16_to_cpu(reply_desc->reply_flags) &
591 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
592 			break;
593 #ifndef CONFIG_PREEMPT_RT
594 		/*
595 		 * Exit completion loop to avoid CPU lockup
596 		 * Ensure remaining completion happens from threaded ISR.
597 		 */
598 		if (num_op_reply > mrioc->max_host_ios) {
599 			op_reply_q->enable_irq_poll = true;
600 			break;
601 		}
602 #endif
603 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
604 			writel(reply_ci,
605 			    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
606 			atomic_sub(threshold_comps, &op_reply_q->pend_ios);
607 			threshold_comps = 0;
608 		}
609 	} while (1);
610 
611 	writel(reply_ci,
612 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
613 	op_reply_q->ci = reply_ci;
614 	op_reply_q->ephase = exp_phase;
615 	atomic_sub(threshold_comps, &op_reply_q->pend_ios);
616 	atomic_dec(&op_reply_q->in_use);
617 	return num_op_reply;
618 }
619 
620 /**
621  * mpi3mr_blk_mq_poll - Operational reply queue handler
622  * @shost: SCSI Host reference
623  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
624  *
625  * Checks the specific operational reply queue and drains the
626  * reply queue entries until the queue is empty and process the
627  * individual reply descriptors.
628  *
629  * Return: 0 if queue is already processed,or number of reply
630  *	    descriptors processed.
631  */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)632 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
633 {
634 	int num_entries = 0;
635 	struct mpi3mr_ioc *mrioc;
636 
637 	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
638 
639 	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
640 	    mrioc->unrecoverable || mrioc->pci_err_recovery))
641 		return 0;
642 
643 	num_entries = mpi3mr_process_op_reply_q(mrioc,
644 			&mrioc->op_reply_qinfo[queue_num]);
645 
646 	return num_entries;
647 }
648 
mpi3mr_isr_primary(int irq,void * privdata)649 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
650 {
651 	struct mpi3mr_intr_info *intr_info = privdata;
652 	struct mpi3mr_ioc *mrioc;
653 	u16 midx;
654 	u32 num_admin_replies = 0, num_op_reply = 0;
655 
656 	if (!intr_info)
657 		return IRQ_NONE;
658 
659 	mrioc = intr_info->mrioc;
660 
661 	if (!mrioc->intr_enabled)
662 		return IRQ_NONE;
663 
664 	midx = intr_info->msix_index;
665 
666 	if (!midx)
667 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
668 	if (intr_info->op_reply_q)
669 		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
670 		    intr_info->op_reply_q);
671 
672 	if (num_admin_replies || num_op_reply)
673 		return IRQ_HANDLED;
674 	else
675 		return IRQ_NONE;
676 }
677 
678 #ifndef CONFIG_PREEMPT_RT
679 
mpi3mr_isr(int irq,void * privdata)680 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
681 {
682 	struct mpi3mr_intr_info *intr_info = privdata;
683 	int ret;
684 
685 	if (!intr_info)
686 		return IRQ_NONE;
687 
688 	/* Call primary ISR routine */
689 	ret = mpi3mr_isr_primary(irq, privdata);
690 
691 	/*
692 	 * If more IOs are expected, schedule IRQ polling thread.
693 	 * Otherwise exit from ISR.
694 	 */
695 	if (!intr_info->op_reply_q)
696 		return ret;
697 
698 	if (!intr_info->op_reply_q->enable_irq_poll ||
699 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
700 		return ret;
701 
702 	disable_irq_nosync(intr_info->os_irq);
703 
704 	return IRQ_WAKE_THREAD;
705 }
706 
707 /**
708  * mpi3mr_isr_poll - Reply queue polling routine
709  * @irq: IRQ
710  * @privdata: Interrupt info
711  *
712  * poll for pending I/O completions in a loop until pending I/Os
713  * present or controller queue depth I/Os are processed.
714  *
715  * Return: IRQ_NONE or IRQ_HANDLED
716  */
mpi3mr_isr_poll(int irq,void * privdata)717 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
718 {
719 	struct mpi3mr_intr_info *intr_info = privdata;
720 	struct mpi3mr_ioc *mrioc;
721 	u16 midx;
722 	u32 num_op_reply = 0;
723 
724 	if (!intr_info || !intr_info->op_reply_q)
725 		return IRQ_NONE;
726 
727 	mrioc = intr_info->mrioc;
728 	midx = intr_info->msix_index;
729 
730 	/* Poll for pending IOs completions */
731 	do {
732 		if (!mrioc->intr_enabled || mrioc->unrecoverable)
733 			break;
734 
735 		if (!midx)
736 			mpi3mr_process_admin_reply_q(mrioc);
737 		if (intr_info->op_reply_q)
738 			num_op_reply +=
739 			    mpi3mr_process_op_reply_q(mrioc,
740 				intr_info->op_reply_q);
741 
742 		usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
743 
744 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
745 	    (num_op_reply < mrioc->max_host_ios));
746 
747 	intr_info->op_reply_q->enable_irq_poll = false;
748 	enable_irq(intr_info->os_irq);
749 
750 	return IRQ_HANDLED;
751 }
752 
753 #endif
754 
755 /**
756  * mpi3mr_request_irq - Request IRQ and register ISR
757  * @mrioc: Adapter instance reference
758  * @index: IRQ vector index
759  *
760  * Request threaded ISR with primary ISR and secondary
761  *
762  * Return: 0 on success and non zero on failures.
763  */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)764 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
765 {
766 	struct pci_dev *pdev = mrioc->pdev;
767 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
768 	int retval = 0;
769 
770 	intr_info->mrioc = mrioc;
771 	intr_info->msix_index = index;
772 	intr_info->op_reply_q = NULL;
773 
774 	snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
775 	    mrioc->driver_name, mrioc->id, index);
776 
777 #ifndef CONFIG_PREEMPT_RT
778 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
779 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
780 #else
781 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
782 	    NULL, IRQF_SHARED, intr_info->name, intr_info);
783 #endif
784 	if (retval) {
785 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
786 		    intr_info->name, pci_irq_vector(pdev, index));
787 		return retval;
788 	}
789 
790 	intr_info->os_irq = pci_irq_vector(pdev, index);
791 	return retval;
792 }
793 
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)794 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
795 {
796 	if (!mrioc->requested_poll_qcount)
797 		return;
798 
799 	/* Reserved for Admin and Default Queue */
800 	if (max_vectors > 2 &&
801 		(mrioc->requested_poll_qcount < max_vectors - 2)) {
802 		ioc_info(mrioc,
803 		    "enabled polled queues (%d) msix (%d)\n",
804 		    mrioc->requested_poll_qcount, max_vectors);
805 	} else {
806 		ioc_info(mrioc,
807 		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
808 		    mrioc->requested_poll_qcount, max_vectors);
809 		mrioc->requested_poll_qcount = 0;
810 	}
811 }
812 
813 /**
814  * mpi3mr_setup_isr - Setup ISR for the controller
815  * @mrioc: Adapter instance reference
816  * @setup_one: Request one IRQ or more
817  *
818  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
819  *
820  * Return: 0 on success and non zero on failures.
821  */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)822 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
823 {
824 	unsigned int irq_flags = PCI_IRQ_MSIX;
825 	int max_vectors, min_vec;
826 	int retval;
827 	int i;
828 	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
829 
830 	if (mrioc->is_intr_info_set)
831 		return 0;
832 
833 	mpi3mr_cleanup_isr(mrioc);
834 
835 	if (setup_one || reset_devices) {
836 		max_vectors = 1;
837 		retval = pci_alloc_irq_vectors(mrioc->pdev,
838 		    1, max_vectors, irq_flags);
839 		if (retval < 0) {
840 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
841 			    retval);
842 			goto out_failed;
843 		}
844 	} else {
845 		max_vectors =
846 		    min_t(int, mrioc->cpu_count + 1 +
847 			mrioc->requested_poll_qcount, mrioc->msix_count);
848 
849 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
850 
851 		ioc_info(mrioc,
852 		    "MSI-X vectors supported: %d, no of cores: %d,",
853 		    mrioc->msix_count, mrioc->cpu_count);
854 		ioc_info(mrioc,
855 		    "MSI-x vectors requested: %d poll_queues %d\n",
856 		    max_vectors, mrioc->requested_poll_qcount);
857 
858 		desc.post_vectors = mrioc->requested_poll_qcount;
859 		min_vec = desc.pre_vectors + desc.post_vectors;
860 		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
861 
862 		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
863 			min_vec, max_vectors, irq_flags, &desc);
864 
865 		if (retval < 0) {
866 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
867 			    retval);
868 			goto out_failed;
869 		}
870 
871 
872 		/*
873 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
874 		 * between Admin queue and operational queue
875 		 */
876 		if (retval == min_vec)
877 			mrioc->op_reply_q_offset = 0;
878 		else if (retval != (max_vectors)) {
879 			ioc_info(mrioc,
880 			    "allocated vectors (%d) are less than configured (%d)\n",
881 			    retval, max_vectors);
882 		}
883 
884 		max_vectors = retval;
885 		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
886 
887 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
888 
889 	}
890 
891 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
892 	    GFP_KERNEL);
893 	if (!mrioc->intr_info) {
894 		retval = -ENOMEM;
895 		pci_free_irq_vectors(mrioc->pdev);
896 		goto out_failed;
897 	}
898 	for (i = 0; i < max_vectors; i++) {
899 		retval = mpi3mr_request_irq(mrioc, i);
900 		if (retval) {
901 			mrioc->intr_info_count = i;
902 			goto out_failed;
903 		}
904 	}
905 	if (reset_devices || !setup_one)
906 		mrioc->is_intr_info_set = true;
907 	mrioc->intr_info_count = max_vectors;
908 	mpi3mr_ioc_enable_intr(mrioc);
909 	return 0;
910 
911 out_failed:
912 	mpi3mr_cleanup_isr(mrioc);
913 
914 	return retval;
915 }
916 
917 static const struct {
918 	enum mpi3mr_iocstate value;
919 	char *name;
920 } mrioc_states[] = {
921 	{ MRIOC_STATE_READY, "ready" },
922 	{ MRIOC_STATE_FAULT, "fault" },
923 	{ MRIOC_STATE_RESET, "reset" },
924 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
925 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
926 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
927 };
928 
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)929 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
930 {
931 	int i;
932 	char *name = NULL;
933 
934 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
935 		if (mrioc_states[i].value == mrioc_state) {
936 			name = mrioc_states[i].name;
937 			break;
938 		}
939 	}
940 	return name;
941 }
942 
943 /* Reset reason to name mapper structure*/
944 static const struct {
945 	enum mpi3mr_reset_reason value;
946 	char *name;
947 } mpi3mr_reset_reason_codes[] = {
948 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
949 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
950 	{ MPI3MR_RESET_FROM_APP, "application invocation" },
951 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
952 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
953 	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
954 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
955 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
956 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
957 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
958 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
959 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
960 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
961 	{
962 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
963 		"create request queue timeout"
964 	},
965 	{
966 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
967 		"create reply queue timeout"
968 	},
969 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
970 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
971 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
972 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
973 	{
974 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
975 		"component image activation timeout"
976 	},
977 	{
978 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
979 		"get package version timeout"
980 	},
981 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
982 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
983 	{
984 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
985 		"diagnostic buffer post timeout"
986 	},
987 	{
988 		MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
989 		"diagnostic buffer release timeout"
990 	},
991 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
992 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
993 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
994 };
995 
996 /**
997  * mpi3mr_reset_rc_name - get reset reason code name
998  * @reason_code: reset reason code value
999  *
1000  * Map reset reason to an NULL terminated ASCII string
1001  *
1002  * Return: name corresponding to reset reason value or NULL.
1003  */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1004 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1005 {
1006 	int i;
1007 	char *name = NULL;
1008 
1009 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1010 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1011 			name = mpi3mr_reset_reason_codes[i].name;
1012 			break;
1013 		}
1014 	}
1015 	return name;
1016 }
1017 
1018 /* Reset type to name mapper structure*/
1019 static const struct {
1020 	u16 reset_type;
1021 	char *name;
1022 } mpi3mr_reset_types[] = {
1023 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1024 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1025 };
1026 
1027 /**
1028  * mpi3mr_reset_type_name - get reset type name
1029  * @reset_type: reset type value
1030  *
1031  * Map reset type to an NULL terminated ASCII string
1032  *
1033  * Return: name corresponding to reset type value or NULL.
1034  */
mpi3mr_reset_type_name(u16 reset_type)1035 static const char *mpi3mr_reset_type_name(u16 reset_type)
1036 {
1037 	int i;
1038 	char *name = NULL;
1039 
1040 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1041 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1042 			name = mpi3mr_reset_types[i].name;
1043 			break;
1044 		}
1045 	}
1046 	return name;
1047 }
1048 
1049 /**
1050  * mpi3mr_is_fault_recoverable - Read fault code and decide
1051  * whether the controller can be recoverable
1052  * @mrioc: Adapter instance reference
1053  * Return: true if fault is recoverable, false otherwise.
1054  */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1055 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1056 {
1057 	u32 fault;
1058 
1059 	fault = (readl(&mrioc->sysif_regs->fault) &
1060 		      MPI3_SYSIF_FAULT_CODE_MASK);
1061 
1062 	switch (fault) {
1063 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1064 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1065 		ioc_warn(mrioc,
1066 		    "controller requires system power cycle, marking controller as unrecoverable\n");
1067 		return false;
1068 	case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1069 		ioc_warn(mrioc,
1070 		    "controller faulted due to insufficient power,\n"
1071 		    " try by connecting it to a different slot\n");
1072 		return false;
1073 	default:
1074 		break;
1075 	}
1076 	return true;
1077 }
1078 
1079 /**
1080  * mpi3mr_print_fault_info - Display fault information
1081  * @mrioc: Adapter instance reference
1082  *
1083  * Display the controller fault information if there is a
1084  * controller fault.
1085  *
1086  * Return: Nothing.
1087  */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1088 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1089 {
1090 	u32 ioc_status, code, code1, code2, code3;
1091 
1092 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1093 
1094 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1095 		code = readl(&mrioc->sysif_regs->fault);
1096 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1097 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1098 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1099 
1100 		ioc_info(mrioc,
1101 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1102 		    code, code1, code2, code3);
1103 	}
1104 }
1105 
1106 /**
1107  * mpi3mr_get_iocstate - Get IOC State
1108  * @mrioc: Adapter instance reference
1109  *
1110  * Return a proper IOC state enum based on the IOC status and
1111  * IOC configuration and unrcoverable state of the controller.
1112  *
1113  * Return: Current IOC state.
1114  */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1115 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1116 {
1117 	u32 ioc_status, ioc_config;
1118 	u8 ready, enabled;
1119 
1120 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1121 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1122 
1123 	if (mrioc->unrecoverable)
1124 		return MRIOC_STATE_UNRECOVERABLE;
1125 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1126 		return MRIOC_STATE_FAULT;
1127 
1128 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1129 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1130 
1131 	if (ready && enabled)
1132 		return MRIOC_STATE_READY;
1133 	if ((!ready) && (!enabled))
1134 		return MRIOC_STATE_RESET;
1135 	if ((!ready) && (enabled))
1136 		return MRIOC_STATE_BECOMING_READY;
1137 
1138 	return MRIOC_STATE_RESET_REQUESTED;
1139 }
1140 
1141 /**
1142  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1143  * @mrioc: Adapter instance reference
1144  *
1145  * Free the DMA memory allocated for IOCTL handling purpose.
1146  *
1147  * Return: None
1148  */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1149 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1150 {
1151 	struct dma_memory_desc *mem_desc;
1152 	u16 i;
1153 
1154 	if (!mrioc->ioctl_dma_pool)
1155 		return;
1156 
1157 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1158 		mem_desc = &mrioc->ioctl_sge[i];
1159 		if (mem_desc->addr) {
1160 			dma_pool_free(mrioc->ioctl_dma_pool,
1161 				      mem_desc->addr,
1162 				      mem_desc->dma_addr);
1163 			mem_desc->addr = NULL;
1164 		}
1165 	}
1166 	dma_pool_destroy(mrioc->ioctl_dma_pool);
1167 	mrioc->ioctl_dma_pool = NULL;
1168 	mem_desc = &mrioc->ioctl_chain_sge;
1169 
1170 	if (mem_desc->addr) {
1171 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1172 				  mem_desc->addr, mem_desc->dma_addr);
1173 		mem_desc->addr = NULL;
1174 	}
1175 	mem_desc = &mrioc->ioctl_resp_sge;
1176 	if (mem_desc->addr) {
1177 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1178 				  mem_desc->addr, mem_desc->dma_addr);
1179 		mem_desc->addr = NULL;
1180 	}
1181 
1182 	mrioc->ioctl_sges_allocated = false;
1183 }
1184 
1185 /**
1186  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1187  * @mrioc: Adapter instance reference
1188  *
1189  * This function allocates dmaable memory required to handle the
1190  * application issued MPI3 IOCTL requests.
1191  *
1192  * Return: None
1193  */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1194 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1195 
1196 {
1197 	struct dma_memory_desc *mem_desc;
1198 	u16 i;
1199 
1200 	mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1201 						&mrioc->pdev->dev,
1202 						MPI3MR_IOCTL_SGE_SIZE,
1203 						MPI3MR_PAGE_SIZE_4K, 0);
1204 
1205 	if (!mrioc->ioctl_dma_pool) {
1206 		ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1207 		goto out_failed;
1208 	}
1209 
1210 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1211 		mem_desc = &mrioc->ioctl_sge[i];
1212 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1213 		mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1214 						 GFP_KERNEL,
1215 						 &mem_desc->dma_addr);
1216 		if (!mem_desc->addr)
1217 			goto out_failed;
1218 	}
1219 
1220 	mem_desc = &mrioc->ioctl_chain_sge;
1221 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1222 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1223 					    mem_desc->size,
1224 					    &mem_desc->dma_addr,
1225 					    GFP_KERNEL);
1226 	if (!mem_desc->addr)
1227 		goto out_failed;
1228 
1229 	mem_desc = &mrioc->ioctl_resp_sge;
1230 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1231 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1232 					    mem_desc->size,
1233 					    &mem_desc->dma_addr,
1234 					    GFP_KERNEL);
1235 	if (!mem_desc->addr)
1236 		goto out_failed;
1237 
1238 	mrioc->ioctl_sges_allocated = true;
1239 
1240 	return;
1241 out_failed:
1242 	ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1243 		 "from the applications, application interface for MPT command is disabled\n");
1244 	mpi3mr_free_ioctl_dma_memory(mrioc);
1245 }
1246 
1247 /**
1248  * mpi3mr_clear_reset_history - clear reset history
1249  * @mrioc: Adapter instance reference
1250  *
1251  * Write the reset history bit in IOC status to clear the bit,
1252  * if it is already set.
1253  *
1254  * Return: Nothing.
1255  */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1256 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1257 {
1258 	u32 ioc_status;
1259 
1260 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1261 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1262 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1263 }
1264 
1265 /**
1266  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1267  * @mrioc: Adapter instance reference
1268  * @reset_reason: Reset reason code
1269  *
1270  * Issue Message unit Reset to the controller and wait for it to
1271  * be complete.
1272  *
1273  * Return: 0 on success, -1 on failure.
1274  */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1275 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1276 	u32 reset_reason)
1277 {
1278 	u32 ioc_config, timeout, ioc_status, scratch_pad0;
1279 	int retval = -1;
1280 
1281 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1282 	if (mrioc->unrecoverable) {
1283 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1284 		return retval;
1285 	}
1286 	mpi3mr_clear_reset_history(mrioc);
1287 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1288 			 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1289 			(mrioc->facts.ioc_num <<
1290 			 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1291 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1292 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1293 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1294 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1295 
1296 	timeout = MPI3MR_MUR_TIMEOUT * 10;
1297 	do {
1298 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1299 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1300 			mpi3mr_clear_reset_history(mrioc);
1301 			break;
1302 		}
1303 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1304 			mpi3mr_print_fault_info(mrioc);
1305 			break;
1306 		}
1307 		msleep(100);
1308 	} while (--timeout);
1309 
1310 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1311 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1312 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1313 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1314 		retval = 0;
1315 
1316 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1317 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1318 	return retval;
1319 }
1320 
1321 /**
1322  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1323  * during reset/resume
1324  * @mrioc: Adapter instance reference
1325  *
1326  * Return: zero if the new IOCFacts parameters value is compatible with
1327  * older values else return -EPERM
1328  */
1329 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1330 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1331 {
1332 	unsigned long *removepend_bitmap;
1333 
1334 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1335 		ioc_err(mrioc,
1336 		    "cannot increase reply size from %d to %d\n",
1337 		    mrioc->reply_sz, mrioc->facts.reply_sz);
1338 		return -EPERM;
1339 	}
1340 
1341 	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1342 		ioc_err(mrioc,
1343 		    "cannot reduce number of operational reply queues from %d to %d\n",
1344 		    mrioc->num_op_reply_q,
1345 		    mrioc->facts.max_op_reply_q);
1346 		return -EPERM;
1347 	}
1348 
1349 	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1350 		ioc_err(mrioc,
1351 		    "cannot reduce number of operational request queues from %d to %d\n",
1352 		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1353 		return -EPERM;
1354 	}
1355 
1356 	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1357 		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1358 			    "\tchanged after reset: previous(%d), new(%d),\n"
1359 			    "the driver cannot change this at run time\n",
1360 			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1361 
1362 	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1363 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1364 		ioc_err(mrioc,
1365 		    "critical error: multipath capability is enabled at the\n"
1366 		    "\tcontroller while sas transport support is enabled at the\n"
1367 		    "\tdriver, please reboot the system or reload the driver\n");
1368 
1369 	if (mrioc->seg_tb_support) {
1370 		if (!(mrioc->facts.ioc_capabilities &
1371 		     MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1372 			ioc_err(mrioc,
1373 			    "critical error: previously enabled segmented trace\n"
1374 			    " buffer capability is disabled after reset. Please\n"
1375 			    " update the firmware or reboot the system or\n"
1376 			    " reload the driver to enable trace diag buffer\n");
1377 			mrioc->diag_buffers[0].disabled_after_reset = true;
1378 		} else
1379 			mrioc->diag_buffers[0].disabled_after_reset = false;
1380 	}
1381 
1382 	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1383 		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1384 						  GFP_KERNEL);
1385 		if (!removepend_bitmap) {
1386 			ioc_err(mrioc,
1387 				"failed to increase removepend_bitmap bits from %d to %d\n",
1388 				mrioc->dev_handle_bitmap_bits,
1389 				mrioc->facts.max_devhandle);
1390 			return -EPERM;
1391 		}
1392 		bitmap_free(mrioc->removepend_bitmap);
1393 		mrioc->removepend_bitmap = removepend_bitmap;
1394 		ioc_info(mrioc,
1395 			 "increased bits of dev_handle_bitmap from %d to %d\n",
1396 			 mrioc->dev_handle_bitmap_bits,
1397 			 mrioc->facts.max_devhandle);
1398 		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 /**
1405  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1406  * @mrioc: Adapter instance reference
1407  *
1408  * Set Enable IOC bit in IOC configuration register and wait for
1409  * the controller to become ready.
1410  *
1411  * Return: 0 on success, appropriate error on failure.
1412  */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1413 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1414 {
1415 	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1416 	int retval = 0;
1417 	enum mpi3mr_iocstate ioc_state;
1418 	u64 base_info;
1419 	u8 retry = 0;
1420 	u64 start_time, elapsed_time_sec;
1421 
1422 retry_bring_ioc_ready:
1423 
1424 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1425 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1426 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1427 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1428 	    ioc_status, ioc_config, base_info);
1429 
1430 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
1431 		mrioc->unrecoverable = 1;
1432 		goto out_device_not_present;
1433 	}
1434 
1435 	/*The timeout value is in 2sec unit, changing it to seconds*/
1436 	mrioc->ready_timeout =
1437 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1438 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1439 
1440 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1441 
1442 	ioc_state = mpi3mr_get_iocstate(mrioc);
1443 	ioc_info(mrioc, "controller is in %s state during detection\n",
1444 	    mpi3mr_iocstate_name(ioc_state));
1445 
1446 	timeout = mrioc->ready_timeout * 10;
1447 
1448 	do {
1449 		ioc_state = mpi3mr_get_iocstate(mrioc);
1450 
1451 		if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1452 		    ioc_state != MRIOC_STATE_RESET_REQUESTED)
1453 			break;
1454 
1455 		if (!pci_device_is_present(mrioc->pdev)) {
1456 			mrioc->unrecoverable = 1;
1457 			ioc_err(mrioc, "controller is not present while waiting to reset\n");
1458 			goto out_device_not_present;
1459 		}
1460 
1461 		msleep(100);
1462 	} while (--timeout);
1463 
1464 	if (ioc_state == MRIOC_STATE_READY) {
1465 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1466 		retval = mpi3mr_issue_and_process_mur(mrioc,
1467 		    MPI3MR_RESET_FROM_BRINGUP);
1468 		ioc_state = mpi3mr_get_iocstate(mrioc);
1469 		if (retval)
1470 			ioc_err(mrioc,
1471 			    "message unit reset failed with error %d current state %s\n",
1472 			    retval, mpi3mr_iocstate_name(ioc_state));
1473 	}
1474 	if (ioc_state != MRIOC_STATE_RESET) {
1475 		if (ioc_state == MRIOC_STATE_FAULT) {
1476 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1477 			mpi3mr_print_fault_info(mrioc);
1478 			do {
1479 				host_diagnostic =
1480 					readl(&mrioc->sysif_regs->host_diagnostic);
1481 				if (!(host_diagnostic &
1482 				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1483 					break;
1484 				if (!pci_device_is_present(mrioc->pdev)) {
1485 					mrioc->unrecoverable = 1;
1486 					ioc_err(mrioc, "controller is not present at the bringup\n");
1487 					goto out_device_not_present;
1488 				}
1489 				msleep(100);
1490 			} while (--timeout);
1491 		}
1492 		mpi3mr_print_fault_info(mrioc);
1493 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1494 		retval = mpi3mr_issue_reset(mrioc,
1495 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1496 		    MPI3MR_RESET_FROM_BRINGUP);
1497 		if (retval) {
1498 			ioc_err(mrioc,
1499 			    "soft reset failed with error %d\n", retval);
1500 			goto out_failed;
1501 		}
1502 	}
1503 	ioc_state = mpi3mr_get_iocstate(mrioc);
1504 	if (ioc_state != MRIOC_STATE_RESET) {
1505 		ioc_err(mrioc,
1506 		    "cannot bring controller to reset state, current state: %s\n",
1507 		    mpi3mr_iocstate_name(ioc_state));
1508 		goto out_failed;
1509 	}
1510 	mpi3mr_clear_reset_history(mrioc);
1511 	retval = mpi3mr_setup_admin_qpair(mrioc);
1512 	if (retval) {
1513 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1514 		    retval);
1515 		goto out_failed;
1516 	}
1517 
1518 	ioc_info(mrioc, "bringing controller to ready state\n");
1519 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1520 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1521 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1522 
1523 	if (retry == 0)
1524 		start_time = jiffies;
1525 
1526 	timeout = mrioc->ready_timeout * 10;
1527 	do {
1528 		ioc_state = mpi3mr_get_iocstate(mrioc);
1529 		if (ioc_state == MRIOC_STATE_READY) {
1530 			ioc_info(mrioc,
1531 			    "successfully transitioned to %s state\n",
1532 			    mpi3mr_iocstate_name(ioc_state));
1533 			return 0;
1534 		}
1535 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1536 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1537 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1538 			mpi3mr_print_fault_info(mrioc);
1539 			goto out_failed;
1540 		}
1541 		if (!pci_device_is_present(mrioc->pdev)) {
1542 			mrioc->unrecoverable = 1;
1543 			ioc_err(mrioc,
1544 			    "controller is not present at the bringup\n");
1545 			retval = -1;
1546 			goto out_device_not_present;
1547 		}
1548 		msleep(100);
1549 		elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1550 	} while (elapsed_time_sec < mrioc->ready_timeout);
1551 
1552 out_failed:
1553 	elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1554 	if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1555 		retry++;
1556 
1557 		ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1558 				" elapsed time =%llu\n", retry, elapsed_time_sec);
1559 
1560 		goto retry_bring_ioc_ready;
1561 	}
1562 	ioc_state = mpi3mr_get_iocstate(mrioc);
1563 	ioc_err(mrioc,
1564 	    "failed to bring to ready state,  current state: %s\n",
1565 	    mpi3mr_iocstate_name(ioc_state));
1566 out_device_not_present:
1567 	return retval;
1568 }
1569 
1570 /**
1571  * mpi3mr_soft_reset_success - Check softreset is success or not
1572  * @ioc_status: IOC status register value
1573  * @ioc_config: IOC config register value
1574  *
1575  * Check whether the soft reset is successful or not based on
1576  * IOC status and IOC config register values.
1577  *
1578  * Return: True when the soft reset is success, false otherwise.
1579  */
1580 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1581 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1582 {
1583 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1584 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1585 		return true;
1586 	return false;
1587 }
1588 
1589 /**
1590  * mpi3mr_diagfault_success - Check diag fault is success or not
1591  * @mrioc: Adapter reference
1592  * @ioc_status: IOC status register value
1593  *
1594  * Check whether the controller hit diag reset fault code.
1595  *
1596  * Return: True when there is diag fault, false otherwise.
1597  */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1598 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1599 	u32 ioc_status)
1600 {
1601 	u32 fault;
1602 
1603 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1604 		return false;
1605 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1606 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1607 		mpi3mr_print_fault_info(mrioc);
1608 		return true;
1609 	}
1610 	return false;
1611 }
1612 
1613 /**
1614  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1615  * @mrioc: Adapter reference
1616  *
1617  * Set diag save bit in IOC configuration register to enable
1618  * snapdump.
1619  *
1620  * Return: Nothing.
1621  */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1622 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1623 {
1624 	u32 ioc_config;
1625 
1626 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1627 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1628 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1629 }
1630 
1631 /**
1632  * mpi3mr_issue_reset - Issue reset to the controller
1633  * @mrioc: Adapter reference
1634  * @reset_type: Reset type
1635  * @reset_reason: Reset reason code
1636  *
1637  * Unlock the host diagnostic registers and write the specific
1638  * reset type to that, wait for reset acknowledgment from the
1639  * controller, if the reset is not successful retry for the
1640  * predefined number of times.
1641  *
1642  * Return: 0 on success, non-zero on failure.
1643  */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1644 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1645 	u16 reset_reason)
1646 {
1647 	int retval = -1;
1648 	u8 unlock_retry_count = 0;
1649 	u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1650 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1651 
1652 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1653 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1654 		return retval;
1655 	if (mrioc->unrecoverable)
1656 		return retval;
1657 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1658 		retval = 0;
1659 		return retval;
1660 	}
1661 
1662 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1663 	    mpi3mr_reset_type_name(reset_type),
1664 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1665 
1666 	mpi3mr_clear_reset_history(mrioc);
1667 	do {
1668 		ioc_info(mrioc,
1669 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1670 		    ++unlock_retry_count);
1671 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1672 			ioc_err(mrioc,
1673 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1674 			    mpi3mr_reset_type_name(reset_type),
1675 			    host_diagnostic);
1676 			mrioc->unrecoverable = 1;
1677 			return retval;
1678 		}
1679 
1680 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1681 		    &mrioc->sysif_regs->write_sequence);
1682 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1683 		    &mrioc->sysif_regs->write_sequence);
1684 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1685 		    &mrioc->sysif_regs->write_sequence);
1686 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1687 		    &mrioc->sysif_regs->write_sequence);
1688 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1689 		    &mrioc->sysif_regs->write_sequence);
1690 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1691 		    &mrioc->sysif_regs->write_sequence);
1692 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1693 		    &mrioc->sysif_regs->write_sequence);
1694 		usleep_range(1000, 1100);
1695 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1696 		ioc_info(mrioc,
1697 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1698 		    unlock_retry_count, host_diagnostic);
1699 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1700 
1701 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1702 	    MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1703 	    MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1704 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1705 	writel(host_diagnostic | reset_type,
1706 	    &mrioc->sysif_regs->host_diagnostic);
1707 	switch (reset_type) {
1708 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1709 		do {
1710 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1711 			ioc_config =
1712 			    readl(&mrioc->sysif_regs->ioc_configuration);
1713 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1714 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1715 			    ) {
1716 				mpi3mr_clear_reset_history(mrioc);
1717 				retval = 0;
1718 				break;
1719 			}
1720 			msleep(100);
1721 		} while (--timeout);
1722 		mpi3mr_print_fault_info(mrioc);
1723 		break;
1724 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1725 		do {
1726 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1727 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1728 				retval = 0;
1729 				break;
1730 			}
1731 			msleep(100);
1732 		} while (--timeout);
1733 		break;
1734 	default:
1735 		break;
1736 	}
1737 
1738 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1739 	    &mrioc->sysif_regs->write_sequence);
1740 
1741 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1742 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1743 	ioc_info(mrioc,
1744 	    "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1745 	    (!retval)?"successful":"failed", ioc_status,
1746 	    ioc_config);
1747 	if (retval)
1748 		mrioc->unrecoverable = 1;
1749 	return retval;
1750 }
1751 
1752 /**
1753  * mpi3mr_admin_request_post - Post request to admin queue
1754  * @mrioc: Adapter reference
1755  * @admin_req: MPI3 request
1756  * @admin_req_sz: Request size
1757  * @ignore_reset: Ignore reset in process
1758  *
1759  * Post the MPI3 request into admin request queue and
1760  * inform the controller, if the queue is full return
1761  * appropriate error.
1762  *
1763  * Return: 0 on success, non-zero on failure.
1764  */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1765 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1766 	u16 admin_req_sz, u8 ignore_reset)
1767 {
1768 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1769 	int retval = 0;
1770 	unsigned long flags;
1771 	u8 *areq_entry;
1772 
1773 	if (mrioc->unrecoverable) {
1774 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1775 		return -EFAULT;
1776 	}
1777 
1778 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1779 	areq_pi = mrioc->admin_req_pi;
1780 	areq_ci = mrioc->admin_req_ci;
1781 	max_entries = mrioc->num_admin_req;
1782 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1783 	    (areq_pi == (max_entries - 1)))) {
1784 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1785 		retval = -EAGAIN;
1786 		goto out;
1787 	}
1788 	if (!ignore_reset && mrioc->reset_in_progress) {
1789 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1790 		retval = -EAGAIN;
1791 		goto out;
1792 	}
1793 	if (mrioc->pci_err_recovery) {
1794 		ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1795 		retval = -EAGAIN;
1796 		goto out;
1797 	}
1798 
1799 	areq_entry = (u8 *)mrioc->admin_req_base +
1800 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1801 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1802 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1803 
1804 	if (++areq_pi == max_entries)
1805 		areq_pi = 0;
1806 	mrioc->admin_req_pi = areq_pi;
1807 
1808 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1809 
1810 out:
1811 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1812 
1813 	return retval;
1814 }
1815 
1816 /**
1817  * mpi3mr_free_op_req_q_segments - free request memory segments
1818  * @mrioc: Adapter instance reference
1819  * @q_idx: operational request queue index
1820  *
1821  * Free memory segments allocated for operational request queue
1822  *
1823  * Return: Nothing.
1824  */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1825 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1826 {
1827 	u16 j;
1828 	int size;
1829 	struct segments *segments;
1830 
1831 	segments = mrioc->req_qinfo[q_idx].q_segments;
1832 	if (!segments)
1833 		return;
1834 
1835 	if (mrioc->enable_segqueue) {
1836 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1837 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1838 			dma_free_coherent(&mrioc->pdev->dev,
1839 			    MPI3MR_MAX_SEG_LIST_SIZE,
1840 			    mrioc->req_qinfo[q_idx].q_segment_list,
1841 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1842 			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1843 		}
1844 	} else
1845 		size = mrioc->req_qinfo[q_idx].segment_qd *
1846 		    mrioc->facts.op_req_sz;
1847 
1848 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1849 		if (!segments[j].segment)
1850 			continue;
1851 		dma_free_coherent(&mrioc->pdev->dev,
1852 		    size, segments[j].segment, segments[j].segment_dma);
1853 		segments[j].segment = NULL;
1854 	}
1855 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1856 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1857 	mrioc->req_qinfo[q_idx].qid = 0;
1858 }
1859 
1860 /**
1861  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1862  * @mrioc: Adapter instance reference
1863  * @q_idx: operational reply queue index
1864  *
1865  * Free memory segments allocated for operational reply queue
1866  *
1867  * Return: Nothing.
1868  */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1869 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1870 {
1871 	u16 j;
1872 	int size;
1873 	struct segments *segments;
1874 
1875 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1876 	if (!segments)
1877 		return;
1878 
1879 	if (mrioc->enable_segqueue) {
1880 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1881 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1882 			dma_free_coherent(&mrioc->pdev->dev,
1883 			    MPI3MR_MAX_SEG_LIST_SIZE,
1884 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1885 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1886 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1887 		}
1888 	} else
1889 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1890 		    mrioc->op_reply_desc_sz;
1891 
1892 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1893 		if (!segments[j].segment)
1894 			continue;
1895 		dma_free_coherent(&mrioc->pdev->dev,
1896 		    size, segments[j].segment, segments[j].segment_dma);
1897 		segments[j].segment = NULL;
1898 	}
1899 
1900 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1901 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1902 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1903 }
1904 
1905 /**
1906  * mpi3mr_delete_op_reply_q - delete operational reply queue
1907  * @mrioc: Adapter instance reference
1908  * @qidx: operational reply queue index
1909  *
1910  * Delete operatinal reply queue by issuing MPI request
1911  * through admin queue.
1912  *
1913  * Return:  0 on success, non-zero on failure.
1914  */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)1915 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1916 {
1917 	struct mpi3_delete_reply_queue_request delq_req;
1918 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1919 	int retval = 0;
1920 	u16 reply_qid = 0, midx;
1921 
1922 	reply_qid = op_reply_q->qid;
1923 
1924 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1925 
1926 	if (!reply_qid)	{
1927 		retval = -1;
1928 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1929 		goto out;
1930 	}
1931 
1932 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1933 	    mrioc->active_poll_qcount--;
1934 
1935 	memset(&delq_req, 0, sizeof(delq_req));
1936 	mutex_lock(&mrioc->init_cmds.mutex);
1937 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1938 		retval = -1;
1939 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1940 		mutex_unlock(&mrioc->init_cmds.mutex);
1941 		goto out;
1942 	}
1943 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1944 	mrioc->init_cmds.is_waiting = 1;
1945 	mrioc->init_cmds.callback = NULL;
1946 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1947 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1948 	delq_req.queue_id = cpu_to_le16(reply_qid);
1949 
1950 	init_completion(&mrioc->init_cmds.done);
1951 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1952 	    1);
1953 	if (retval) {
1954 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1955 		goto out_unlock;
1956 	}
1957 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1958 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1959 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1960 		ioc_err(mrioc, "delete reply queue timed out\n");
1961 		mpi3mr_check_rh_fault_ioc(mrioc,
1962 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1963 		retval = -1;
1964 		goto out_unlock;
1965 	}
1966 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1967 	    != MPI3_IOCSTATUS_SUCCESS) {
1968 		ioc_err(mrioc,
1969 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1970 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1971 		    mrioc->init_cmds.ioc_loginfo);
1972 		retval = -1;
1973 		goto out_unlock;
1974 	}
1975 	mrioc->intr_info[midx].op_reply_q = NULL;
1976 
1977 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1978 out_unlock:
1979 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1980 	mutex_unlock(&mrioc->init_cmds.mutex);
1981 out:
1982 
1983 	return retval;
1984 }
1985 
1986 /**
1987  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1988  * @mrioc: Adapter instance reference
1989  * @qidx: request queue index
1990  *
1991  * Allocate segmented memory pools for operational reply
1992  * queue.
1993  *
1994  * Return: 0 on success, non-zero on failure.
1995  */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)1996 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1997 {
1998 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1999 	int i, size;
2000 	u64 *q_segment_list_entry = NULL;
2001 	struct segments *segments;
2002 
2003 	if (mrioc->enable_segqueue) {
2004 		op_reply_q->segment_qd =
2005 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
2006 
2007 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
2008 
2009 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2010 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2011 		    GFP_KERNEL);
2012 		if (!op_reply_q->q_segment_list)
2013 			return -ENOMEM;
2014 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2015 	} else {
2016 		op_reply_q->segment_qd = op_reply_q->num_replies;
2017 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2018 	}
2019 
2020 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2021 	    op_reply_q->segment_qd);
2022 
2023 	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
2024 	    sizeof(struct segments), GFP_KERNEL);
2025 	if (!op_reply_q->q_segments)
2026 		return -ENOMEM;
2027 
2028 	segments = op_reply_q->q_segments;
2029 	for (i = 0; i < op_reply_q->num_segments; i++) {
2030 		segments[i].segment =
2031 		    dma_alloc_coherent(&mrioc->pdev->dev,
2032 		    size, &segments[i].segment_dma, GFP_KERNEL);
2033 		if (!segments[i].segment)
2034 			return -ENOMEM;
2035 		if (mrioc->enable_segqueue)
2036 			q_segment_list_entry[i] =
2037 			    (unsigned long)segments[i].segment_dma;
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 /**
2044  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2045  * @mrioc: Adapter instance reference
2046  * @qidx: request queue index
2047  *
2048  * Allocate segmented memory pools for operational request
2049  * queue.
2050  *
2051  * Return: 0 on success, non-zero on failure.
2052  */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2053 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2054 {
2055 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2056 	int i, size;
2057 	u64 *q_segment_list_entry = NULL;
2058 	struct segments *segments;
2059 
2060 	if (mrioc->enable_segqueue) {
2061 		op_req_q->segment_qd =
2062 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2063 
2064 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2065 
2066 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2067 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2068 		    GFP_KERNEL);
2069 		if (!op_req_q->q_segment_list)
2070 			return -ENOMEM;
2071 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2072 
2073 	} else {
2074 		op_req_q->segment_qd = op_req_q->num_requests;
2075 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2076 	}
2077 
2078 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2079 	    op_req_q->segment_qd);
2080 
2081 	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
2082 	    sizeof(struct segments), GFP_KERNEL);
2083 	if (!op_req_q->q_segments)
2084 		return -ENOMEM;
2085 
2086 	segments = op_req_q->q_segments;
2087 	for (i = 0; i < op_req_q->num_segments; i++) {
2088 		segments[i].segment =
2089 		    dma_alloc_coherent(&mrioc->pdev->dev,
2090 		    size, &segments[i].segment_dma, GFP_KERNEL);
2091 		if (!segments[i].segment)
2092 			return -ENOMEM;
2093 		if (mrioc->enable_segqueue)
2094 			q_segment_list_entry[i] =
2095 			    (unsigned long)segments[i].segment_dma;
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 /**
2102  * mpi3mr_create_op_reply_q - create operational reply queue
2103  * @mrioc: Adapter instance reference
2104  * @qidx: operational reply queue index
2105  *
2106  * Create operatinal reply queue by issuing MPI request
2107  * through admin queue.
2108  *
2109  * Return:  0 on success, non-zero on failure.
2110  */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2111 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2112 {
2113 	struct mpi3_create_reply_queue_request create_req;
2114 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2115 	int retval = 0;
2116 	u16 reply_qid = 0, midx;
2117 
2118 	reply_qid = op_reply_q->qid;
2119 
2120 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2121 
2122 	if (reply_qid) {
2123 		retval = -1;
2124 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2125 		    reply_qid);
2126 
2127 		return retval;
2128 	}
2129 
2130 	reply_qid = qidx + 1;
2131 
2132 	if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2133 		if (mrioc->pdev->revision)
2134 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2135 		else
2136 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2137 	} else
2138 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2139 
2140 	op_reply_q->ci = 0;
2141 	op_reply_q->ephase = 1;
2142 	atomic_set(&op_reply_q->pend_ios, 0);
2143 	atomic_set(&op_reply_q->in_use, 0);
2144 	op_reply_q->enable_irq_poll = false;
2145 	op_reply_q->qfull_watermark =
2146 		op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2147 
2148 	if (!op_reply_q->q_segments) {
2149 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2150 		if (retval) {
2151 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2152 			goto out;
2153 		}
2154 	}
2155 
2156 	memset(&create_req, 0, sizeof(create_req));
2157 	mutex_lock(&mrioc->init_cmds.mutex);
2158 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2159 		retval = -1;
2160 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2161 		goto out_unlock;
2162 	}
2163 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2164 	mrioc->init_cmds.is_waiting = 1;
2165 	mrioc->init_cmds.callback = NULL;
2166 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2167 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2168 	create_req.queue_id = cpu_to_le16(reply_qid);
2169 
2170 	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2171 		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2172 	else
2173 		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2174 
2175 	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2176 		create_req.flags =
2177 			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2178 		create_req.msix_index =
2179 			cpu_to_le16(mrioc->intr_info[midx].msix_index);
2180 	} else {
2181 		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2182 		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2183 			reply_qid, midx);
2184 		if (!mrioc->active_poll_qcount)
2185 			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2186 			    mrioc->intr_info_count - 1));
2187 	}
2188 
2189 	if (mrioc->enable_segqueue) {
2190 		create_req.flags |=
2191 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2192 		create_req.base_address = cpu_to_le64(
2193 		    op_reply_q->q_segment_list_dma);
2194 	} else
2195 		create_req.base_address = cpu_to_le64(
2196 		    op_reply_q->q_segments[0].segment_dma);
2197 
2198 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
2199 
2200 	init_completion(&mrioc->init_cmds.done);
2201 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2202 	    sizeof(create_req), 1);
2203 	if (retval) {
2204 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2205 		goto out_unlock;
2206 	}
2207 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2208 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2209 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2210 		ioc_err(mrioc, "create reply queue timed out\n");
2211 		mpi3mr_check_rh_fault_ioc(mrioc,
2212 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2213 		retval = -1;
2214 		goto out_unlock;
2215 	}
2216 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2217 	    != MPI3_IOCSTATUS_SUCCESS) {
2218 		ioc_err(mrioc,
2219 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2220 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2221 		    mrioc->init_cmds.ioc_loginfo);
2222 		retval = -1;
2223 		goto out_unlock;
2224 	}
2225 	op_reply_q->qid = reply_qid;
2226 	if (midx < mrioc->intr_info_count)
2227 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
2228 
2229 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2230 	    mrioc->active_poll_qcount++;
2231 
2232 out_unlock:
2233 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2234 	mutex_unlock(&mrioc->init_cmds.mutex);
2235 out:
2236 
2237 	return retval;
2238 }
2239 
2240 /**
2241  * mpi3mr_create_op_req_q - create operational request queue
2242  * @mrioc: Adapter instance reference
2243  * @idx: operational request queue index
2244  * @reply_qid: Reply queue ID
2245  *
2246  * Create operatinal request queue by issuing MPI request
2247  * through admin queue.
2248  *
2249  * Return:  0 on success, non-zero on failure.
2250  */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2251 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2252 	u16 reply_qid)
2253 {
2254 	struct mpi3_create_request_queue_request create_req;
2255 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2256 	int retval = 0;
2257 	u16 req_qid = 0;
2258 
2259 	req_qid = op_req_q->qid;
2260 
2261 	if (req_qid) {
2262 		retval = -1;
2263 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2264 		    req_qid);
2265 
2266 		return retval;
2267 	}
2268 	req_qid = idx + 1;
2269 
2270 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2271 	op_req_q->ci = 0;
2272 	op_req_q->pi = 0;
2273 	op_req_q->reply_qid = reply_qid;
2274 	spin_lock_init(&op_req_q->q_lock);
2275 
2276 	if (!op_req_q->q_segments) {
2277 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2278 		if (retval) {
2279 			mpi3mr_free_op_req_q_segments(mrioc, idx);
2280 			goto out;
2281 		}
2282 	}
2283 
2284 	memset(&create_req, 0, sizeof(create_req));
2285 	mutex_lock(&mrioc->init_cmds.mutex);
2286 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2287 		retval = -1;
2288 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2289 		goto out_unlock;
2290 	}
2291 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2292 	mrioc->init_cmds.is_waiting = 1;
2293 	mrioc->init_cmds.callback = NULL;
2294 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2295 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2296 	create_req.queue_id = cpu_to_le16(req_qid);
2297 	if (mrioc->enable_segqueue) {
2298 		create_req.flags =
2299 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2300 		create_req.base_address = cpu_to_le64(
2301 		    op_req_q->q_segment_list_dma);
2302 	} else
2303 		create_req.base_address = cpu_to_le64(
2304 		    op_req_q->q_segments[0].segment_dma);
2305 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2306 	create_req.size = cpu_to_le16(op_req_q->num_requests);
2307 
2308 	init_completion(&mrioc->init_cmds.done);
2309 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2310 	    sizeof(create_req), 1);
2311 	if (retval) {
2312 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2313 		goto out_unlock;
2314 	}
2315 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2316 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2317 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2318 		ioc_err(mrioc, "create request queue timed out\n");
2319 		mpi3mr_check_rh_fault_ioc(mrioc,
2320 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2321 		retval = -1;
2322 		goto out_unlock;
2323 	}
2324 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2325 	    != MPI3_IOCSTATUS_SUCCESS) {
2326 		ioc_err(mrioc,
2327 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2328 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2329 		    mrioc->init_cmds.ioc_loginfo);
2330 		retval = -1;
2331 		goto out_unlock;
2332 	}
2333 	op_req_q->qid = req_qid;
2334 
2335 out_unlock:
2336 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2337 	mutex_unlock(&mrioc->init_cmds.mutex);
2338 out:
2339 
2340 	return retval;
2341 }
2342 
2343 /**
2344  * mpi3mr_create_op_queues - create operational queue pairs
2345  * @mrioc: Adapter instance reference
2346  *
2347  * Allocate memory for operational queue meta data and call
2348  * create request and reply queue functions.
2349  *
2350  * Return: 0 on success, non-zero on failures.
2351  */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2352 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2353 {
2354 	int retval = 0;
2355 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2356 
2357 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2358 	    mrioc->facts.max_op_req_q);
2359 
2360 	msix_count_op_q =
2361 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2362 	if (!mrioc->num_queues)
2363 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2364 	/*
2365 	 * During reset set the num_queues to the number of queues
2366 	 * that was set before the reset.
2367 	 */
2368 	num_queues = mrioc->num_op_reply_q ?
2369 	    mrioc->num_op_reply_q : mrioc->num_queues;
2370 	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2371 	    num_queues);
2372 
2373 	if (!mrioc->req_qinfo) {
2374 		mrioc->req_qinfo = kcalloc(num_queues,
2375 		    sizeof(struct op_req_qinfo), GFP_KERNEL);
2376 		if (!mrioc->req_qinfo) {
2377 			retval = -1;
2378 			goto out_failed;
2379 		}
2380 
2381 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2382 		    num_queues, GFP_KERNEL);
2383 		if (!mrioc->op_reply_qinfo) {
2384 			retval = -1;
2385 			goto out_failed;
2386 		}
2387 	}
2388 
2389 	if (mrioc->enable_segqueue)
2390 		ioc_info(mrioc,
2391 		    "allocating operational queues through segmented queues\n");
2392 
2393 	for (i = 0; i < num_queues; i++) {
2394 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2395 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2396 			break;
2397 		}
2398 		if (mpi3mr_create_op_req_q(mrioc, i,
2399 		    mrioc->op_reply_qinfo[i].qid)) {
2400 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2401 			mpi3mr_delete_op_reply_q(mrioc, i);
2402 			break;
2403 		}
2404 	}
2405 
2406 	if (i == 0) {
2407 		/* Not even one queue is created successfully*/
2408 		retval = -1;
2409 		goto out_failed;
2410 	}
2411 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2412 	ioc_info(mrioc,
2413 	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2414 	    mrioc->num_op_reply_q, mrioc->default_qcount,
2415 	    mrioc->active_poll_qcount);
2416 
2417 	return retval;
2418 out_failed:
2419 	kfree(mrioc->req_qinfo);
2420 	mrioc->req_qinfo = NULL;
2421 
2422 	kfree(mrioc->op_reply_qinfo);
2423 	mrioc->op_reply_qinfo = NULL;
2424 
2425 	return retval;
2426 }
2427 
2428 /**
2429  * mpi3mr_op_request_post - Post request to operational queue
2430  * @mrioc: Adapter reference
2431  * @op_req_q: Operational request queue info
2432  * @req: MPI3 request
2433  *
2434  * Post the MPI3 request into operational request queue and
2435  * inform the controller, if the queue is full return
2436  * appropriate error.
2437  *
2438  * Return: 0 on success, non-zero on failure.
2439  */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2440 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2441 	struct op_req_qinfo *op_req_q, u8 *req)
2442 {
2443 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2444 	int retval = 0;
2445 	unsigned long flags;
2446 	u8 *req_entry;
2447 	void *segment_base_addr;
2448 	u16 req_sz = mrioc->facts.op_req_sz;
2449 	struct segments *segments = op_req_q->q_segments;
2450 	struct op_reply_qinfo *op_reply_q = NULL;
2451 
2452 	reply_qidx = op_req_q->reply_qid - 1;
2453 	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2454 
2455 	if (mrioc->unrecoverable)
2456 		return -EFAULT;
2457 
2458 	spin_lock_irqsave(&op_req_q->q_lock, flags);
2459 	pi = op_req_q->pi;
2460 	max_entries = op_req_q->num_requests;
2461 
2462 	if (mpi3mr_check_req_qfull(op_req_q)) {
2463 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2464 		    reply_qidx, mrioc->op_reply_q_offset);
2465 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2466 
2467 		if (mpi3mr_check_req_qfull(op_req_q)) {
2468 			retval = -EAGAIN;
2469 			goto out;
2470 		}
2471 	}
2472 
2473 	if (mrioc->reset_in_progress) {
2474 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2475 		retval = -EAGAIN;
2476 		goto out;
2477 	}
2478 	if (mrioc->pci_err_recovery) {
2479 		ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2480 		retval = -EAGAIN;
2481 		goto out;
2482 	}
2483 
2484 	/* Reply queue is nearing to get full, push back IOs to SML */
2485 	if ((mrioc->prevent_reply_qfull == true) &&
2486 		(atomic_read(&op_reply_q->pend_ios) >
2487 	     (op_reply_q->qfull_watermark))) {
2488 		atomic_inc(&mrioc->reply_qfull_count);
2489 		retval = -EAGAIN;
2490 		goto out;
2491 	}
2492 
2493 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2494 	req_entry = (u8 *)segment_base_addr +
2495 	    ((pi % op_req_q->segment_qd) * req_sz);
2496 
2497 	memset(req_entry, 0, req_sz);
2498 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2499 
2500 	if (++pi == max_entries)
2501 		pi = 0;
2502 	op_req_q->pi = pi;
2503 
2504 #ifndef CONFIG_PREEMPT_RT
2505 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2506 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2507 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2508 #else
2509 	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2510 #endif
2511 
2512 	writel(op_req_q->pi,
2513 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2514 
2515 out:
2516 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2517 	return retval;
2518 }
2519 
2520 /**
2521  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2522  * controller
2523  * @mrioc: Adapter instance reference
2524  * @reason_code: reason code for the fault.
2525  *
2526  * This routine will save snapdump and fault the controller with
2527  * the given reason code if it is not already in the fault or
2528  * not asynchronosuly reset. This will be used to handle
2529  * initilaization time faults/resets/timeout as in those cases
2530  * immediate soft reset invocation is not required.
2531  *
2532  * Return:  None.
2533  */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2534 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2535 {
2536 	u32 ioc_status, host_diagnostic, timeout;
2537 	union mpi3mr_trigger_data trigger_data;
2538 
2539 	if (mrioc->unrecoverable) {
2540 		ioc_err(mrioc, "controller is unrecoverable\n");
2541 		return;
2542 	}
2543 
2544 	if (!pci_device_is_present(mrioc->pdev)) {
2545 		mrioc->unrecoverable = 1;
2546 		ioc_err(mrioc, "controller is not present\n");
2547 		return;
2548 	}
2549 	memset(&trigger_data, 0, sizeof(trigger_data));
2550 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2551 
2552 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2553 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2554 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2555 		return;
2556 	} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2557 		trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2558 		      MPI3_SYSIF_FAULT_CODE_MASK);
2559 
2560 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2561 		    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2562 		mpi3mr_print_fault_info(mrioc);
2563 		return;
2564 	}
2565 
2566 	mpi3mr_set_diagsave(mrioc);
2567 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2568 	    reason_code);
2569 	trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2570 		      MPI3_SYSIF_FAULT_CODE_MASK);
2571 	mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2572 	    &trigger_data, 0);
2573 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2574 	do {
2575 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2576 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2577 			break;
2578 		msleep(100);
2579 	} while (--timeout);
2580 }
2581 
2582 /**
2583  * mpi3mr_sync_timestamp - Issue time stamp sync request
2584  * @mrioc: Adapter reference
2585  *
2586  * Issue IO unit control MPI request to synchornize firmware
2587  * timestamp with host time.
2588  *
2589  * Return: 0 on success, non-zero on failure.
2590  */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2591 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2592 {
2593 	ktime_t current_time;
2594 	struct mpi3_iounit_control_request iou_ctrl;
2595 	int retval = 0;
2596 
2597 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2598 	mutex_lock(&mrioc->init_cmds.mutex);
2599 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2600 		retval = -1;
2601 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2602 		mutex_unlock(&mrioc->init_cmds.mutex);
2603 		goto out;
2604 	}
2605 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2606 	mrioc->init_cmds.is_waiting = 1;
2607 	mrioc->init_cmds.callback = NULL;
2608 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2609 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2610 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2611 	current_time = ktime_get_real();
2612 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2613 
2614 	init_completion(&mrioc->init_cmds.done);
2615 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2616 	    sizeof(iou_ctrl), 0);
2617 	if (retval) {
2618 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2619 		goto out_unlock;
2620 	}
2621 
2622 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2623 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2624 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2625 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2626 		mrioc->init_cmds.is_waiting = 0;
2627 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2628 			mpi3mr_check_rh_fault_ioc(mrioc,
2629 			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2630 		retval = -1;
2631 		goto out_unlock;
2632 	}
2633 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2634 	    != MPI3_IOCSTATUS_SUCCESS) {
2635 		ioc_err(mrioc,
2636 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2637 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2638 		    mrioc->init_cmds.ioc_loginfo);
2639 		retval = -1;
2640 		goto out_unlock;
2641 	}
2642 
2643 out_unlock:
2644 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2645 	mutex_unlock(&mrioc->init_cmds.mutex);
2646 
2647 out:
2648 	return retval;
2649 }
2650 
2651 /**
2652  * mpi3mr_print_pkg_ver - display controller fw package version
2653  * @mrioc: Adapter reference
2654  *
2655  * Retrieve firmware package version from the component image
2656  * header of the controller flash and display it.
2657  *
2658  * Return: 0 on success and non-zero on failure.
2659  */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2660 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2661 {
2662 	struct mpi3_ci_upload_request ci_upload;
2663 	int retval = -1;
2664 	void *data = NULL;
2665 	dma_addr_t data_dma;
2666 	struct mpi3_ci_manifest_mpi *manifest;
2667 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2668 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2669 
2670 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2671 	    GFP_KERNEL);
2672 	if (!data)
2673 		return -ENOMEM;
2674 
2675 	memset(&ci_upload, 0, sizeof(ci_upload));
2676 	mutex_lock(&mrioc->init_cmds.mutex);
2677 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2678 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2679 		mutex_unlock(&mrioc->init_cmds.mutex);
2680 		goto out;
2681 	}
2682 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2683 	mrioc->init_cmds.is_waiting = 1;
2684 	mrioc->init_cmds.callback = NULL;
2685 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2686 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2687 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2688 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2689 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2690 	ci_upload.segment_size = cpu_to_le32(data_len);
2691 
2692 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2693 	    data_dma);
2694 	init_completion(&mrioc->init_cmds.done);
2695 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2696 	    sizeof(ci_upload), 1);
2697 	if (retval) {
2698 		ioc_err(mrioc, "posting get package version failed\n");
2699 		goto out_unlock;
2700 	}
2701 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2702 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2703 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2704 		ioc_err(mrioc, "get package version timed out\n");
2705 		mpi3mr_check_rh_fault_ioc(mrioc,
2706 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2707 		retval = -1;
2708 		goto out_unlock;
2709 	}
2710 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2711 	    == MPI3_IOCSTATUS_SUCCESS) {
2712 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2713 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2714 			ioc_info(mrioc,
2715 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2716 			    manifest->package_version.gen_major,
2717 			    manifest->package_version.gen_minor,
2718 			    manifest->package_version.phase_major,
2719 			    manifest->package_version.phase_minor,
2720 			    manifest->package_version.customer_id,
2721 			    manifest->package_version.build_num);
2722 		}
2723 	}
2724 	retval = 0;
2725 out_unlock:
2726 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2727 	mutex_unlock(&mrioc->init_cmds.mutex);
2728 
2729 out:
2730 	if (data)
2731 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2732 		    data_dma);
2733 	return retval;
2734 }
2735 
2736 /**
2737  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2738  * @work: work struct
2739  *
2740  * Watch dog work periodically executed (1 second interval) to
2741  * monitor firmware fault and to issue periodic timer sync to
2742  * the firmware.
2743  *
2744  * Return: Nothing.
2745  */
mpi3mr_watchdog_work(struct work_struct * work)2746 static void mpi3mr_watchdog_work(struct work_struct *work)
2747 {
2748 	struct mpi3mr_ioc *mrioc =
2749 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2750 	unsigned long flags;
2751 	enum mpi3mr_iocstate ioc_state;
2752 	u32 host_diagnostic, ioc_status;
2753 	union mpi3mr_trigger_data trigger_data;
2754 	u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2755 
2756 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2757 		return;
2758 
2759 	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2760 		ioc_err(mrioc, "watchdog could not detect the controller\n");
2761 		mrioc->unrecoverable = 1;
2762 	}
2763 
2764 	if (mrioc->unrecoverable) {
2765 		ioc_err(mrioc,
2766 		    "flush pending commands for unrecoverable controller\n");
2767 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2768 		return;
2769 	}
2770 
2771 	if (atomic_read(&mrioc->admin_pend_isr)) {
2772 		ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2773 				"flush admin replies\n");
2774 		mpi3mr_process_admin_reply_q(mrioc);
2775 	}
2776 
2777 	if (!(mrioc->facts.ioc_capabilities &
2778 		MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2779 		(mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2780 
2781 		mrioc->ts_update_counter = 0;
2782 		mpi3mr_sync_timestamp(mrioc);
2783 	}
2784 
2785 	if ((mrioc->prepare_for_reset) &&
2786 	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2787 	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2788 		mpi3mr_soft_reset_handler(mrioc,
2789 		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2790 		return;
2791 	}
2792 
2793 	memset(&trigger_data, 0, sizeof(trigger_data));
2794 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2795 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2796 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2797 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2798 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2799 		return;
2800 	}
2801 
2802 	/*Check for fault state every one second and issue Soft reset*/
2803 	ioc_state = mpi3mr_get_iocstate(mrioc);
2804 	if (ioc_state != MRIOC_STATE_FAULT)
2805 		goto schedule_work;
2806 
2807 	trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2808 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2809 	    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2810 	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2811 	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2812 		if (!mrioc->diagsave_timeout) {
2813 			mpi3mr_print_fault_info(mrioc);
2814 			ioc_warn(mrioc, "diag save in progress\n");
2815 		}
2816 		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2817 			goto schedule_work;
2818 	}
2819 
2820 	mpi3mr_print_fault_info(mrioc);
2821 	mrioc->diagsave_timeout = 0;
2822 
2823 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
2824 		mrioc->unrecoverable = 1;
2825 		goto schedule_work;
2826 	}
2827 
2828 	switch (trigger_data.fault) {
2829 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2830 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2831 		ioc_warn(mrioc,
2832 		    "controller requires system power cycle, marking controller as unrecoverable\n");
2833 		mrioc->unrecoverable = 1;
2834 		goto schedule_work;
2835 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2836 		goto schedule_work;
2837 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2838 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2839 		break;
2840 	default:
2841 		break;
2842 	}
2843 	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2844 	return;
2845 
2846 schedule_work:
2847 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2848 	if (mrioc->watchdog_work_q)
2849 		queue_delayed_work(mrioc->watchdog_work_q,
2850 		    &mrioc->watchdog_work,
2851 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2852 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2853 	return;
2854 }
2855 
2856 /**
2857  * mpi3mr_start_watchdog - Start watchdog
2858  * @mrioc: Adapter instance reference
2859  *
2860  * Create and start the watchdog thread to monitor controller
2861  * faults.
2862  *
2863  * Return: Nothing.
2864  */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2865 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2866 {
2867 	if (mrioc->watchdog_work_q)
2868 		return;
2869 
2870 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2871 	snprintf(mrioc->watchdog_work_q_name,
2872 	    sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2873 	    mrioc->id);
2874 	mrioc->watchdog_work_q = alloc_ordered_workqueue(
2875 		"%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
2876 	if (!mrioc->watchdog_work_q) {
2877 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2878 		return;
2879 	}
2880 
2881 	if (mrioc->watchdog_work_q)
2882 		queue_delayed_work(mrioc->watchdog_work_q,
2883 		    &mrioc->watchdog_work,
2884 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2885 }
2886 
2887 /**
2888  * mpi3mr_stop_watchdog - Stop watchdog
2889  * @mrioc: Adapter instance reference
2890  *
2891  * Stop the watchdog thread created to monitor controller
2892  * faults.
2893  *
2894  * Return: Nothing.
2895  */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)2896 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2897 {
2898 	unsigned long flags;
2899 	struct workqueue_struct *wq;
2900 
2901 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2902 	wq = mrioc->watchdog_work_q;
2903 	mrioc->watchdog_work_q = NULL;
2904 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2905 	if (wq) {
2906 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2907 			flush_workqueue(wq);
2908 		destroy_workqueue(wq);
2909 	}
2910 }
2911 
2912 /**
2913  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2914  * @mrioc: Adapter instance reference
2915  *
2916  * Allocate memory for admin queue pair if required and register
2917  * the admin queue with the controller.
2918  *
2919  * Return: 0 on success, non-zero on failures.
2920  */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)2921 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2922 {
2923 	int retval = 0;
2924 	u32 num_admin_entries = 0;
2925 
2926 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2927 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
2928 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
2929 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2930 
2931 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2932 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2933 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
2934 	mrioc->admin_reply_ci = 0;
2935 	mrioc->admin_reply_ephase = 1;
2936 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
2937 	atomic_set(&mrioc->admin_pend_isr, 0);
2938 
2939 	if (!mrioc->admin_req_base) {
2940 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2941 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2942 
2943 		if (!mrioc->admin_req_base) {
2944 			retval = -1;
2945 			goto out_failed;
2946 		}
2947 
2948 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2949 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2950 		    GFP_KERNEL);
2951 
2952 		if (!mrioc->admin_reply_base) {
2953 			retval = -1;
2954 			goto out_failed;
2955 		}
2956 	}
2957 
2958 	num_admin_entries = (mrioc->num_admin_replies << 16) |
2959 	    (mrioc->num_admin_req);
2960 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2961 	mpi3mr_writeq(mrioc->admin_req_dma,
2962 		&mrioc->sysif_regs->admin_request_queue_address,
2963 		&mrioc->adm_req_q_bar_writeq_lock);
2964 	mpi3mr_writeq(mrioc->admin_reply_dma,
2965 		&mrioc->sysif_regs->admin_reply_queue_address,
2966 		&mrioc->adm_reply_q_bar_writeq_lock);
2967 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2968 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2969 	return retval;
2970 
2971 out_failed:
2972 
2973 	if (mrioc->admin_reply_base) {
2974 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2975 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
2976 		mrioc->admin_reply_base = NULL;
2977 	}
2978 	if (mrioc->admin_req_base) {
2979 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2980 		    mrioc->admin_req_base, mrioc->admin_req_dma);
2981 		mrioc->admin_req_base = NULL;
2982 	}
2983 	return retval;
2984 }
2985 
2986 /**
2987  * mpi3mr_issue_iocfacts - Send IOC Facts
2988  * @mrioc: Adapter instance reference
2989  * @facts_data: Cached IOC facts data
2990  *
2991  * Issue IOC Facts MPI request through admin queue and wait for
2992  * the completion of it or time out.
2993  *
2994  * Return: 0 on success, non-zero on failures.
2995  */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)2996 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2997 	struct mpi3_ioc_facts_data *facts_data)
2998 {
2999 	struct mpi3_ioc_facts_request iocfacts_req;
3000 	void *data = NULL;
3001 	dma_addr_t data_dma;
3002 	u32 data_len = sizeof(*facts_data);
3003 	int retval = 0;
3004 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
3005 
3006 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3007 	    GFP_KERNEL);
3008 
3009 	if (!data) {
3010 		retval = -1;
3011 		goto out;
3012 	}
3013 
3014 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3015 	mutex_lock(&mrioc->init_cmds.mutex);
3016 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3017 		retval = -1;
3018 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3019 		mutex_unlock(&mrioc->init_cmds.mutex);
3020 		goto out;
3021 	}
3022 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3023 	mrioc->init_cmds.is_waiting = 1;
3024 	mrioc->init_cmds.callback = NULL;
3025 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3026 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3027 
3028 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3029 	    data_dma);
3030 
3031 	init_completion(&mrioc->init_cmds.done);
3032 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3033 	    sizeof(iocfacts_req), 1);
3034 	if (retval) {
3035 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3036 		goto out_unlock;
3037 	}
3038 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3039 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3040 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3041 		ioc_err(mrioc, "ioc_facts timed out\n");
3042 		mpi3mr_check_rh_fault_ioc(mrioc,
3043 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3044 		retval = -1;
3045 		goto out_unlock;
3046 	}
3047 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3048 	    != MPI3_IOCSTATUS_SUCCESS) {
3049 		ioc_err(mrioc,
3050 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3051 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3052 		    mrioc->init_cmds.ioc_loginfo);
3053 		retval = -1;
3054 		goto out_unlock;
3055 	}
3056 	memcpy(facts_data, (u8 *)data, data_len);
3057 	mpi3mr_process_factsdata(mrioc, facts_data);
3058 out_unlock:
3059 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3060 	mutex_unlock(&mrioc->init_cmds.mutex);
3061 
3062 out:
3063 	if (data)
3064 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3065 
3066 	return retval;
3067 }
3068 
3069 /**
3070  * mpi3mr_check_reset_dma_mask - Process IOC facts data
3071  * @mrioc: Adapter instance reference
3072  *
3073  * Check whether the new DMA mask requested through IOCFacts by
3074  * firmware needs to be set, if so set it .
3075  *
3076  * Return: 0 on success, non-zero on failure.
3077  */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3078 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3079 {
3080 	struct pci_dev *pdev = mrioc->pdev;
3081 	int r;
3082 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3083 
3084 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3085 		return 0;
3086 
3087 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3088 	    mrioc->dma_mask, facts_dma_mask);
3089 
3090 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3091 	if (r) {
3092 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3093 		    facts_dma_mask, r);
3094 		return r;
3095 	}
3096 	mrioc->dma_mask = facts_dma_mask;
3097 	return r;
3098 }
3099 
3100 /**
3101  * mpi3mr_process_factsdata - Process IOC facts data
3102  * @mrioc: Adapter instance reference
3103  * @facts_data: Cached IOC facts data
3104  *
3105  * Convert IOC facts data into cpu endianness and cache it in
3106  * the driver .
3107  *
3108  * Return: Nothing.
3109  */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3110 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3111 	struct mpi3_ioc_facts_data *facts_data)
3112 {
3113 	u32 ioc_config, req_sz, facts_flags;
3114 
3115 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3116 	    (sizeof(*facts_data) / 4)) {
3117 		ioc_warn(mrioc,
3118 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3119 		    sizeof(*facts_data),
3120 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3121 	}
3122 
3123 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3124 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3125 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3126 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3127 		ioc_err(mrioc,
3128 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3129 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3130 	}
3131 
3132 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3133 
3134 	facts_flags = le32_to_cpu(facts_data->flags);
3135 	mrioc->facts.op_req_sz = req_sz;
3136 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3137 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3138 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3139 
3140 	mrioc->facts.ioc_num = facts_data->ioc_number;
3141 	mrioc->facts.who_init = facts_data->who_init;
3142 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3143 	mrioc->facts.personality = (facts_flags &
3144 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3145 	mrioc->facts.dma_mask = (facts_flags &
3146 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3147 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3148 	mrioc->facts.dma_mask = (facts_flags &
3149 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3150 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3151 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
3152 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3153 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3154 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3155 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3156 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3157 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3158 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3159 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3160 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3161 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3162 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3163 	mrioc->facts.max_pcie_switches =
3164 	    le16_to_cpu(facts_data->max_pcie_switches);
3165 	mrioc->facts.max_sasexpanders =
3166 	    le16_to_cpu(facts_data->max_sas_expanders);
3167 	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3168 	mrioc->facts.max_sasinitiators =
3169 	    le16_to_cpu(facts_data->max_sas_initiators);
3170 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3171 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3172 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3173 	mrioc->facts.max_op_req_q =
3174 	    le16_to_cpu(facts_data->max_operational_request_queues);
3175 	mrioc->facts.max_op_reply_q =
3176 	    le16_to_cpu(facts_data->max_operational_reply_queues);
3177 	mrioc->facts.ioc_capabilities =
3178 	    le32_to_cpu(facts_data->ioc_capabilities);
3179 	mrioc->facts.fw_ver.build_num =
3180 	    le16_to_cpu(facts_data->fw_version.build_num);
3181 	mrioc->facts.fw_ver.cust_id =
3182 	    le16_to_cpu(facts_data->fw_version.customer_id);
3183 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3184 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3185 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3186 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3187 	mrioc->msix_count = min_t(int, mrioc->msix_count,
3188 	    mrioc->facts.max_msix_vectors);
3189 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3190 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3191 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3192 	mrioc->facts.shutdown_timeout =
3193 	    le16_to_cpu(facts_data->shutdown_timeout);
3194 	mrioc->facts.diag_trace_sz =
3195 	    le32_to_cpu(facts_data->diag_trace_size);
3196 	mrioc->facts.diag_fw_sz =
3197 	    le32_to_cpu(facts_data->diag_fw_size);
3198 	mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3199 	mrioc->facts.max_dev_per_tg =
3200 	    facts_data->max_devices_per_throttle_group;
3201 	mrioc->facts.io_throttle_data_length =
3202 	    le16_to_cpu(facts_data->io_throttle_data_length);
3203 	mrioc->facts.max_io_throttle_group =
3204 	    le16_to_cpu(facts_data->max_io_throttle_group);
3205 	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3206 	mrioc->facts.io_throttle_high =
3207 	    le16_to_cpu(facts_data->io_throttle_high);
3208 
3209 	if (mrioc->facts.max_data_length ==
3210 	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3211 		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3212 	else
3213 		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3214 	/* Store in 512b block count */
3215 	if (mrioc->facts.io_throttle_data_length)
3216 		mrioc->io_throttle_data_length =
3217 		    (mrioc->facts.io_throttle_data_length * 2 * 4);
3218 	else
3219 		/* set the length to 1MB + 1K to disable throttle */
3220 		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3221 
3222 	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3223 	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3224 
3225 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3226 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3227 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3228 	ioc_info(mrioc,
3229 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3230 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3231 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3232 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3233 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3234 	    mrioc->facts.sge_mod_shift);
3235 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3236 	    mrioc->facts.dma_mask, (facts_flags &
3237 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3238 	ioc_info(mrioc,
3239 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3240 	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3241 	ioc_info(mrioc,
3242 	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3243 	   mrioc->facts.io_throttle_data_length * 4,
3244 	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3245 }
3246 
3247 /**
3248  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3249  * @mrioc: Adapter instance reference
3250  *
3251  * Allocate and initialize the reply free buffers, sense
3252  * buffers, reply free queue and sense buffer queue.
3253  *
3254  * Return: 0 on success, non-zero on failures.
3255  */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3256 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3257 {
3258 	int retval = 0;
3259 	u32 sz, i;
3260 
3261 	if (mrioc->init_cmds.reply)
3262 		return retval;
3263 
3264 	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3265 	if (!mrioc->init_cmds.reply)
3266 		goto out_failed;
3267 
3268 	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3269 	if (!mrioc->bsg_cmds.reply)
3270 		goto out_failed;
3271 
3272 	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3273 	if (!mrioc->transport_cmds.reply)
3274 		goto out_failed;
3275 
3276 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3277 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3278 		    GFP_KERNEL);
3279 		if (!mrioc->dev_rmhs_cmds[i].reply)
3280 			goto out_failed;
3281 	}
3282 
3283 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3284 		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3285 		    GFP_KERNEL);
3286 		if (!mrioc->evtack_cmds[i].reply)
3287 			goto out_failed;
3288 	}
3289 
3290 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3291 	if (!mrioc->host_tm_cmds.reply)
3292 		goto out_failed;
3293 
3294 	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3295 	if (!mrioc->pel_cmds.reply)
3296 		goto out_failed;
3297 
3298 	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3299 	if (!mrioc->pel_abort_cmd.reply)
3300 		goto out_failed;
3301 
3302 	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3303 	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3304 						 GFP_KERNEL);
3305 	if (!mrioc->removepend_bitmap)
3306 		goto out_failed;
3307 
3308 	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3309 	if (!mrioc->devrem_bitmap)
3310 		goto out_failed;
3311 
3312 	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3313 						  GFP_KERNEL);
3314 	if (!mrioc->evtack_cmds_bitmap)
3315 		goto out_failed;
3316 
3317 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3318 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3319 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3320 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3321 
3322 	/* reply buffer pool, 16 byte align */
3323 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3324 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3325 	    &mrioc->pdev->dev, sz, 16, 0);
3326 	if (!mrioc->reply_buf_pool) {
3327 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3328 		goto out_failed;
3329 	}
3330 
3331 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3332 	    &mrioc->reply_buf_dma);
3333 	if (!mrioc->reply_buf)
3334 		goto out_failed;
3335 
3336 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3337 
3338 	/* reply free queue, 8 byte align */
3339 	sz = mrioc->reply_free_qsz * 8;
3340 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3341 	    &mrioc->pdev->dev, sz, 8, 0);
3342 	if (!mrioc->reply_free_q_pool) {
3343 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3344 		goto out_failed;
3345 	}
3346 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3347 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3348 	if (!mrioc->reply_free_q)
3349 		goto out_failed;
3350 
3351 	/* sense buffer pool,  4 byte align */
3352 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3353 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3354 	    &mrioc->pdev->dev, sz, 4, 0);
3355 	if (!mrioc->sense_buf_pool) {
3356 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3357 		goto out_failed;
3358 	}
3359 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3360 	    &mrioc->sense_buf_dma);
3361 	if (!mrioc->sense_buf)
3362 		goto out_failed;
3363 
3364 	/* sense buffer queue, 8 byte align */
3365 	sz = mrioc->sense_buf_q_sz * 8;
3366 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3367 	    &mrioc->pdev->dev, sz, 8, 0);
3368 	if (!mrioc->sense_buf_q_pool) {
3369 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3370 		goto out_failed;
3371 	}
3372 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3373 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3374 	if (!mrioc->sense_buf_q)
3375 		goto out_failed;
3376 
3377 	return retval;
3378 
3379 out_failed:
3380 	retval = -1;
3381 	return retval;
3382 }
3383 
3384 /**
3385  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3386  * buffers
3387  * @mrioc: Adapter instance reference
3388  *
3389  * Helper function to initialize reply and sense buffers along
3390  * with some debug prints.
3391  *
3392  * Return:  None.
3393  */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3394 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3395 {
3396 	u32 sz, i;
3397 	dma_addr_t phy_addr;
3398 
3399 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3400 	ioc_info(mrioc,
3401 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3402 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3403 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3404 	sz = mrioc->reply_free_qsz * 8;
3405 	ioc_info(mrioc,
3406 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3407 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3408 	    (unsigned long long)mrioc->reply_free_q_dma);
3409 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3410 	ioc_info(mrioc,
3411 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3412 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3413 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3414 	sz = mrioc->sense_buf_q_sz * 8;
3415 	ioc_info(mrioc,
3416 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3417 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3418 	    (unsigned long long)mrioc->sense_buf_q_dma);
3419 
3420 	/* initialize Reply buffer Queue */
3421 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3422 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3423 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3424 	mrioc->reply_free_q[i] = cpu_to_le64(0);
3425 
3426 	/* initialize Sense Buffer Queue */
3427 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3428 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3429 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3430 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3431 }
3432 
3433 /**
3434  * mpi3mr_issue_iocinit - Send IOC Init
3435  * @mrioc: Adapter instance reference
3436  *
3437  * Issue IOC Init MPI request through admin queue and wait for
3438  * the completion of it or time out.
3439  *
3440  * Return: 0 on success, non-zero on failures.
3441  */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3442 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3443 {
3444 	struct mpi3_ioc_init_request iocinit_req;
3445 	struct mpi3_driver_info_layout *drv_info;
3446 	dma_addr_t data_dma;
3447 	u32 data_len = sizeof(*drv_info);
3448 	int retval = 0;
3449 	ktime_t current_time;
3450 
3451 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3452 	    GFP_KERNEL);
3453 	if (!drv_info) {
3454 		retval = -1;
3455 		goto out;
3456 	}
3457 	mpimr_initialize_reply_sbuf_queues(mrioc);
3458 
3459 	drv_info->information_length = cpu_to_le32(data_len);
3460 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3461 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3462 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3463 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3464 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3465 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3466 	    sizeof(drv_info->driver_release_date));
3467 	drv_info->driver_capabilities = 0;
3468 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3469 	    sizeof(mrioc->driver_info));
3470 
3471 	memset(&iocinit_req, 0, sizeof(iocinit_req));
3472 	mutex_lock(&mrioc->init_cmds.mutex);
3473 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3474 		retval = -1;
3475 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3476 		mutex_unlock(&mrioc->init_cmds.mutex);
3477 		goto out;
3478 	}
3479 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3480 	mrioc->init_cmds.is_waiting = 1;
3481 	mrioc->init_cmds.callback = NULL;
3482 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3483 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3484 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3485 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3486 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3487 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3488 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3489 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3490 	iocinit_req.reply_free_queue_address =
3491 	    cpu_to_le64(mrioc->reply_free_q_dma);
3492 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3493 	iocinit_req.sense_buffer_free_queue_depth =
3494 	    cpu_to_le16(mrioc->sense_buf_q_sz);
3495 	iocinit_req.sense_buffer_free_queue_address =
3496 	    cpu_to_le64(mrioc->sense_buf_q_dma);
3497 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3498 
3499 	current_time = ktime_get_real();
3500 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3501 
3502 	iocinit_req.msg_flags |=
3503 	    MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3504 	iocinit_req.msg_flags |=
3505 		MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3506 
3507 	init_completion(&mrioc->init_cmds.done);
3508 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3509 	    sizeof(iocinit_req), 1);
3510 	if (retval) {
3511 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3512 		goto out_unlock;
3513 	}
3514 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3515 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3516 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3517 		mpi3mr_check_rh_fault_ioc(mrioc,
3518 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3519 		ioc_err(mrioc, "ioc_init timed out\n");
3520 		retval = -1;
3521 		goto out_unlock;
3522 	}
3523 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3524 	    != MPI3_IOCSTATUS_SUCCESS) {
3525 		ioc_err(mrioc,
3526 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3527 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3528 		    mrioc->init_cmds.ioc_loginfo);
3529 		retval = -1;
3530 		goto out_unlock;
3531 	}
3532 
3533 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3534 	writel(mrioc->reply_free_queue_host_index,
3535 	    &mrioc->sysif_regs->reply_free_host_index);
3536 
3537 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3538 	writel(mrioc->sbq_host_index,
3539 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3540 out_unlock:
3541 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3542 	mutex_unlock(&mrioc->init_cmds.mutex);
3543 
3544 out:
3545 	if (drv_info)
3546 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3547 		    data_dma);
3548 
3549 	return retval;
3550 }
3551 
3552 /**
3553  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3554  * @mrioc: Adapter instance reference
3555  * @event: MPI event ID
3556  *
3557  * Un mask the specific event by resetting the event_mask
3558  * bitmap.
3559  *
3560  * Return: 0 on success, non-zero on failures.
3561  */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3562 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3563 {
3564 	u32 desired_event;
3565 	u8 word;
3566 
3567 	if (event >= 128)
3568 		return;
3569 
3570 	desired_event = (1 << (event % 32));
3571 	word = event / 32;
3572 
3573 	mrioc->event_masks[word] &= ~desired_event;
3574 }
3575 
3576 /**
3577  * mpi3mr_issue_event_notification - Send event notification
3578  * @mrioc: Adapter instance reference
3579  *
3580  * Issue event notification MPI request through admin queue and
3581  * wait for the completion of it or time out.
3582  *
3583  * Return: 0 on success, non-zero on failures.
3584  */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3585 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3586 {
3587 	struct mpi3_event_notification_request evtnotify_req;
3588 	int retval = 0;
3589 	u8 i;
3590 
3591 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3592 	mutex_lock(&mrioc->init_cmds.mutex);
3593 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3594 		retval = -1;
3595 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3596 		mutex_unlock(&mrioc->init_cmds.mutex);
3597 		goto out;
3598 	}
3599 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3600 	mrioc->init_cmds.is_waiting = 1;
3601 	mrioc->init_cmds.callback = NULL;
3602 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3603 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3604 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3605 		evtnotify_req.event_masks[i] =
3606 		    cpu_to_le32(mrioc->event_masks[i]);
3607 	init_completion(&mrioc->init_cmds.done);
3608 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3609 	    sizeof(evtnotify_req), 1);
3610 	if (retval) {
3611 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3612 		goto out_unlock;
3613 	}
3614 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3615 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3616 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3617 		ioc_err(mrioc, "event notification timed out\n");
3618 		mpi3mr_check_rh_fault_ioc(mrioc,
3619 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3620 		retval = -1;
3621 		goto out_unlock;
3622 	}
3623 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3624 	    != MPI3_IOCSTATUS_SUCCESS) {
3625 		ioc_err(mrioc,
3626 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3627 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3628 		    mrioc->init_cmds.ioc_loginfo);
3629 		retval = -1;
3630 		goto out_unlock;
3631 	}
3632 
3633 out_unlock:
3634 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3635 	mutex_unlock(&mrioc->init_cmds.mutex);
3636 out:
3637 	return retval;
3638 }
3639 
3640 /**
3641  * mpi3mr_process_event_ack - Process event acknowledgment
3642  * @mrioc: Adapter instance reference
3643  * @event: MPI3 event ID
3644  * @event_ctx: event context
3645  *
3646  * Send event acknowledgment through admin queue and wait for
3647  * it to complete.
3648  *
3649  * Return: 0 on success, non-zero on failures.
3650  */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3651 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3652 	u32 event_ctx)
3653 {
3654 	struct mpi3_event_ack_request evtack_req;
3655 	int retval = 0;
3656 
3657 	memset(&evtack_req, 0, sizeof(evtack_req));
3658 	mutex_lock(&mrioc->init_cmds.mutex);
3659 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3660 		retval = -1;
3661 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3662 		mutex_unlock(&mrioc->init_cmds.mutex);
3663 		goto out;
3664 	}
3665 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3666 	mrioc->init_cmds.is_waiting = 1;
3667 	mrioc->init_cmds.callback = NULL;
3668 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3669 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3670 	evtack_req.event = event;
3671 	evtack_req.event_context = cpu_to_le32(event_ctx);
3672 
3673 	init_completion(&mrioc->init_cmds.done);
3674 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3675 	    sizeof(evtack_req), 1);
3676 	if (retval) {
3677 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3678 		goto out_unlock;
3679 	}
3680 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3681 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3682 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3683 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3684 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3685 			mpi3mr_check_rh_fault_ioc(mrioc,
3686 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3687 		retval = -1;
3688 		goto out_unlock;
3689 	}
3690 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3691 	    != MPI3_IOCSTATUS_SUCCESS) {
3692 		ioc_err(mrioc,
3693 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3694 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3695 		    mrioc->init_cmds.ioc_loginfo);
3696 		retval = -1;
3697 		goto out_unlock;
3698 	}
3699 
3700 out_unlock:
3701 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3702 	mutex_unlock(&mrioc->init_cmds.mutex);
3703 out:
3704 	return retval;
3705 }
3706 
3707 /**
3708  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3709  * @mrioc: Adapter instance reference
3710  *
3711  * Allocate chain buffers and set a bitmap to indicate free
3712  * chain buffers. Chain buffers are used to pass the SGE
3713  * information along with MPI3 SCSI IO requests for host I/O.
3714  *
3715  * Return: 0 on success, non-zero on failure
3716  */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3717 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3718 {
3719 	int retval = 0;
3720 	u32 sz, i;
3721 	u16 num_chains;
3722 
3723 	if (mrioc->chain_sgl_list)
3724 		return retval;
3725 
3726 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3727 
3728 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3729 	    | SHOST_DIX_TYPE1_PROTECTION
3730 	    | SHOST_DIX_TYPE2_PROTECTION
3731 	    | SHOST_DIX_TYPE3_PROTECTION))
3732 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3733 
3734 	mrioc->chain_buf_count = num_chains;
3735 	sz = sizeof(struct chain_element) * num_chains;
3736 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3737 	if (!mrioc->chain_sgl_list)
3738 		goto out_failed;
3739 
3740 	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3741 		MPI3MR_PAGE_SIZE_4K))
3742 		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3743 			MPI3MR_PAGE_SIZE_4K;
3744 	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3745 	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3746 			mrioc->max_sgl_entries, sz/1024);
3747 
3748 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3749 	    &mrioc->pdev->dev, sz, 16, 0);
3750 	if (!mrioc->chain_buf_pool) {
3751 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3752 		goto out_failed;
3753 	}
3754 
3755 	for (i = 0; i < num_chains; i++) {
3756 		mrioc->chain_sgl_list[i].addr =
3757 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3758 		    &mrioc->chain_sgl_list[i].dma_addr);
3759 
3760 		if (!mrioc->chain_sgl_list[i].addr)
3761 			goto out_failed;
3762 	}
3763 	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3764 	if (!mrioc->chain_bitmap)
3765 		goto out_failed;
3766 	return retval;
3767 out_failed:
3768 	retval = -1;
3769 	return retval;
3770 }
3771 
3772 /**
3773  * mpi3mr_port_enable_complete - Mark port enable complete
3774  * @mrioc: Adapter instance reference
3775  * @drv_cmd: Internal command tracker
3776  *
3777  * Call back for asynchronous port enable request sets the
3778  * driver command to indicate port enable request is complete.
3779  *
3780  * Return: Nothing
3781  */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3782 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3783 	struct mpi3mr_drv_cmd *drv_cmd)
3784 {
3785 	drv_cmd->callback = NULL;
3786 	mrioc->scan_started = 0;
3787 	if (drv_cmd->state & MPI3MR_CMD_RESET)
3788 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3789 	else
3790 		mrioc->scan_failed = drv_cmd->ioc_status;
3791 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3792 }
3793 
3794 /**
3795  * mpi3mr_issue_port_enable - Issue Port Enable
3796  * @mrioc: Adapter instance reference
3797  * @async: Flag to wait for completion or not
3798  *
3799  * Issue Port Enable MPI request through admin queue and if the
3800  * async flag is not set wait for the completion of the port
3801  * enable or time out.
3802  *
3803  * Return: 0 on success, non-zero on failures.
3804  */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3805 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3806 {
3807 	struct mpi3_port_enable_request pe_req;
3808 	int retval = 0;
3809 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3810 
3811 	memset(&pe_req, 0, sizeof(pe_req));
3812 	mutex_lock(&mrioc->init_cmds.mutex);
3813 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3814 		retval = -1;
3815 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3816 		mutex_unlock(&mrioc->init_cmds.mutex);
3817 		goto out;
3818 	}
3819 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3820 	if (async) {
3821 		mrioc->init_cmds.is_waiting = 0;
3822 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3823 	} else {
3824 		mrioc->init_cmds.is_waiting = 1;
3825 		mrioc->init_cmds.callback = NULL;
3826 		init_completion(&mrioc->init_cmds.done);
3827 	}
3828 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3829 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3830 
3831 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3832 	if (retval) {
3833 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3834 		goto out_unlock;
3835 	}
3836 	if (async) {
3837 		mutex_unlock(&mrioc->init_cmds.mutex);
3838 		goto out;
3839 	}
3840 
3841 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3842 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3843 		ioc_err(mrioc, "port enable timed out\n");
3844 		retval = -1;
3845 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3846 		goto out_unlock;
3847 	}
3848 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3849 
3850 out_unlock:
3851 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3852 	mutex_unlock(&mrioc->init_cmds.mutex);
3853 out:
3854 	return retval;
3855 }
3856 
3857 /* Protocol type to name mapper structure */
3858 static const struct {
3859 	u8 protocol;
3860 	char *name;
3861 } mpi3mr_protocols[] = {
3862 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3863 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3864 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3865 };
3866 
3867 /* Capability to name mapper structure*/
3868 static const struct {
3869 	u32 capability;
3870 	char *name;
3871 } mpi3mr_capabilities[] = {
3872 	{ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3873 	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3874 };
3875 
3876 /**
3877  * mpi3mr_repost_diag_bufs - repost host diag buffers
3878  * @mrioc: Adapter instance reference
3879  *
3880  * repost firmware and trace diag buffers based on global
3881  * trigger flag from driver page 2
3882  *
3883  * Return: 0 on success, non-zero on failures.
3884  */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)3885 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
3886 {
3887 	u64 global_trigger;
3888 	union mpi3mr_trigger_data prev_trigger_data;
3889 	struct diag_buffer_desc *trace_hdb = NULL;
3890 	struct diag_buffer_desc *fw_hdb = NULL;
3891 	int retval = 0;
3892 	bool trace_repost_needed = false;
3893 	bool fw_repost_needed = false;
3894 	u8 prev_trigger_type;
3895 
3896 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
3897 	if (retval)
3898 		return -1;
3899 
3900 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
3901 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
3902 
3903 	if (trace_hdb &&
3904 	    trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3905 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3906 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3907 		trace_repost_needed = true;
3908 
3909 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
3910 
3911 	if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3912 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3913 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3914 		fw_repost_needed = true;
3915 
3916 	if (trace_repost_needed || fw_repost_needed) {
3917 		global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
3918 		if (global_trigger &
3919 		      MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
3920 			trace_repost_needed = false;
3921 		if (global_trigger &
3922 		     MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
3923 			fw_repost_needed = false;
3924 	}
3925 
3926 	if (trace_repost_needed) {
3927 		prev_trigger_type = trace_hdb->trigger_type;
3928 		memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
3929 		    sizeof(trace_hdb->trigger_data));
3930 		retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
3931 		if (!retval) {
3932 			dprint_init(mrioc, "trace diag buffer reposted");
3933 			mpi3mr_set_trigger_data_in_hdb(trace_hdb,
3934 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3935 		} else {
3936 			trace_hdb->trigger_type = prev_trigger_type;
3937 			memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
3938 			    sizeof(prev_trigger_data));
3939 			ioc_err(mrioc, "trace diag buffer repost failed");
3940 			return -1;
3941 		}
3942 	}
3943 
3944 	if (fw_repost_needed) {
3945 		prev_trigger_type = fw_hdb->trigger_type;
3946 		memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
3947 		    sizeof(fw_hdb->trigger_data));
3948 		retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
3949 		if (!retval) {
3950 			dprint_init(mrioc, "firmware diag buffer reposted");
3951 			mpi3mr_set_trigger_data_in_hdb(fw_hdb,
3952 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3953 		} else {
3954 			fw_hdb->trigger_type = prev_trigger_type;
3955 			memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
3956 			    sizeof(prev_trigger_data));
3957 			ioc_err(mrioc, "firmware diag buffer repost failed");
3958 			return -1;
3959 		}
3960 	}
3961 	return retval;
3962 }
3963 
3964 /**
3965  * mpi3mr_read_tsu_interval - Update time stamp interval
3966  * @mrioc: Adapter instance reference
3967  *
3968  * Update time stamp interval if its defined in driver page 1,
3969  * otherwise use default value.
3970  *
3971  * Return: Nothing
3972  */
3973 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)3974 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
3975 {
3976 	struct mpi3_driver_page1 driver_pg1;
3977 	u16 pg_sz = sizeof(driver_pg1);
3978 	int retval = 0;
3979 
3980 	mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
3981 
3982 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
3983 	if (!retval && driver_pg1.time_stamp_update)
3984 		mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
3985 }
3986 
3987 /**
3988  * mpi3mr_print_ioc_info - Display controller information
3989  * @mrioc: Adapter instance reference
3990  *
3991  * Display controller personality, capability, supported
3992  * protocols etc.
3993  *
3994  * Return: Nothing
3995  */
3996 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)3997 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3998 {
3999 	int i = 0, bytes_written = 0;
4000 	const char *personality;
4001 	char protocol[50] = {0};
4002 	char capabilities[100] = {0};
4003 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
4004 
4005 	switch (mrioc->facts.personality) {
4006 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
4007 		personality = "Enhanced HBA";
4008 		break;
4009 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
4010 		personality = "RAID";
4011 		break;
4012 	default:
4013 		personality = "Unknown";
4014 		break;
4015 	}
4016 
4017 	ioc_info(mrioc, "Running in %s Personality", personality);
4018 
4019 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4020 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4021 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
4022 
4023 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4024 		if (mrioc->facts.protocol_flags &
4025 		    mpi3mr_protocols[i].protocol) {
4026 			bytes_written += scnprintf(protocol + bytes_written,
4027 				    sizeof(protocol) - bytes_written, "%s%s",
4028 				    bytes_written ? "," : "",
4029 				    mpi3mr_protocols[i].name);
4030 		}
4031 	}
4032 
4033 	bytes_written = 0;
4034 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4035 		if (mrioc->facts.protocol_flags &
4036 		    mpi3mr_capabilities[i].capability) {
4037 			bytes_written += scnprintf(capabilities + bytes_written,
4038 				    sizeof(capabilities) - bytes_written, "%s%s",
4039 				    bytes_written ? "," : "",
4040 				    mpi3mr_capabilities[i].name);
4041 		}
4042 	}
4043 
4044 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4045 		 protocol, capabilities);
4046 }
4047 
4048 /**
4049  * mpi3mr_cleanup_resources - Free PCI resources
4050  * @mrioc: Adapter instance reference
4051  *
4052  * Unmap PCI device memory and disable PCI device.
4053  *
4054  * Return: 0 on success and non-zero on failure.
4055  */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4056 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4057 {
4058 	struct pci_dev *pdev = mrioc->pdev;
4059 
4060 	mpi3mr_cleanup_isr(mrioc);
4061 
4062 	if (mrioc->sysif_regs) {
4063 		iounmap((void __iomem *)mrioc->sysif_regs);
4064 		mrioc->sysif_regs = NULL;
4065 	}
4066 
4067 	if (pci_is_enabled(pdev)) {
4068 		if (mrioc->bars)
4069 			pci_release_selected_regions(pdev, mrioc->bars);
4070 		pci_disable_device(pdev);
4071 	}
4072 }
4073 
4074 /**
4075  * mpi3mr_setup_resources - Enable PCI resources
4076  * @mrioc: Adapter instance reference
4077  *
4078  * Enable PCI device memory, MSI-x registers and set DMA mask.
4079  *
4080  * Return: 0 on success and non-zero on failure.
4081  */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4082 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4083 {
4084 	struct pci_dev *pdev = mrioc->pdev;
4085 	u32 memap_sz = 0;
4086 	int i, retval = 0, capb = 0;
4087 	u16 message_control;
4088 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4089 	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4090 
4091 	if (pci_enable_device_mem(pdev)) {
4092 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4093 		retval = -ENODEV;
4094 		goto out_failed;
4095 	}
4096 
4097 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4098 	if (!capb) {
4099 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4100 		retval = -ENODEV;
4101 		goto out_failed;
4102 	}
4103 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4104 
4105 	if (pci_request_selected_regions(pdev, mrioc->bars,
4106 	    mrioc->driver_name)) {
4107 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4108 		retval = -ENODEV;
4109 		goto out_failed;
4110 	}
4111 
4112 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4113 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4114 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4115 			memap_sz = pci_resource_len(pdev, i);
4116 			mrioc->sysif_regs =
4117 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
4118 			break;
4119 		}
4120 	}
4121 
4122 	pci_set_master(pdev);
4123 
4124 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4125 	if (retval) {
4126 		if (dma_mask != DMA_BIT_MASK(32)) {
4127 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4128 			dma_mask = DMA_BIT_MASK(32);
4129 			retval = dma_set_mask_and_coherent(&pdev->dev,
4130 			    dma_mask);
4131 		}
4132 		if (retval) {
4133 			mrioc->dma_mask = 0;
4134 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4135 			goto out_failed;
4136 		}
4137 	}
4138 	mrioc->dma_mask = dma_mask;
4139 
4140 	if (!mrioc->sysif_regs) {
4141 		ioc_err(mrioc,
4142 		    "Unable to map adapter memory or resource not found\n");
4143 		retval = -EINVAL;
4144 		goto out_failed;
4145 	}
4146 
4147 	pci_read_config_word(pdev, capb + 2, &message_control);
4148 	mrioc->msix_count = (message_control & 0x3FF) + 1;
4149 
4150 	pci_save_state(pdev);
4151 
4152 	pci_set_drvdata(pdev, mrioc->shost);
4153 
4154 	mpi3mr_ioc_disable_intr(mrioc);
4155 
4156 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4157 	    (unsigned long long)mrioc->sysif_regs_phys,
4158 	    mrioc->sysif_regs, memap_sz);
4159 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4160 	    mrioc->msix_count);
4161 
4162 	if (!reset_devices && poll_queues > 0)
4163 		mrioc->requested_poll_qcount = min_t(int, poll_queues,
4164 				mrioc->msix_count - 2);
4165 	return retval;
4166 
4167 out_failed:
4168 	mpi3mr_cleanup_resources(mrioc);
4169 	return retval;
4170 }
4171 
4172 /**
4173  * mpi3mr_enable_events - Enable required events
4174  * @mrioc: Adapter instance reference
4175  *
4176  * This routine unmasks the events required by the driver by
4177  * sennding appropriate event mask bitmapt through an event
4178  * notification request.
4179  *
4180  * Return: 0 on success and non-zero on failure.
4181  */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4182 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4183 {
4184 	int retval = 0;
4185 	u32  i;
4186 
4187 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4188 		mrioc->event_masks[i] = -1;
4189 
4190 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4191 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4192 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4193 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4194 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4195 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4196 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4197 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4198 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4199 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4200 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4201 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4202 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4203 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4204 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4205 
4206 	retval = mpi3mr_issue_event_notification(mrioc);
4207 	if (retval)
4208 		ioc_err(mrioc, "failed to issue event notification %d\n",
4209 		    retval);
4210 	return retval;
4211 }
4212 
4213 /**
4214  * mpi3mr_init_ioc - Initialize the controller
4215  * @mrioc: Adapter instance reference
4216  *
4217  * This the controller initialization routine, executed either
4218  * after soft reset or from pci probe callback.
4219  * Setup the required resources, memory map the controller
4220  * registers, create admin and operational reply queue pairs,
4221  * allocate required memory for reply pool, sense buffer pool,
4222  * issue IOC init request to the firmware, unmask the events and
4223  * issue port enable to discover SAS/SATA/NVMe devies and RAID
4224  * volumes.
4225  *
4226  * Return: 0 on success and non-zero on failure.
4227  */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4228 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4229 {
4230 	int retval = 0;
4231 	u8 retry = 0;
4232 	struct mpi3_ioc_facts_data facts_data;
4233 	u32 sz;
4234 
4235 retry_init:
4236 	retval = mpi3mr_bring_ioc_ready(mrioc);
4237 	if (retval) {
4238 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4239 		    retval);
4240 		goto out_failed_noretry;
4241 	}
4242 
4243 	retval = mpi3mr_setup_isr(mrioc, 1);
4244 	if (retval) {
4245 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
4246 		    retval);
4247 		goto out_failed_noretry;
4248 	}
4249 
4250 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4251 	if (retval) {
4252 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4253 		    retval);
4254 		goto out_failed;
4255 	}
4256 
4257 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4258 	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4259 	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4260 	atomic_set(&mrioc->pend_large_data_sz, 0);
4261 
4262 	if (reset_devices)
4263 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4264 		    MPI3MR_HOST_IOS_KDUMP);
4265 
4266 	if (!(mrioc->facts.ioc_capabilities &
4267 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4268 		mrioc->sas_transport_enabled = 1;
4269 		mrioc->scsi_device_channel = 1;
4270 		mrioc->shost->max_channel = 1;
4271 		mrioc->shost->transportt = mpi3mr_transport_template;
4272 	}
4273 
4274 	if (mrioc->facts.max_req_limit)
4275 		mrioc->prevent_reply_qfull = true;
4276 
4277 	if (mrioc->facts.ioc_capabilities &
4278 		MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4279 		mrioc->seg_tb_support = true;
4280 
4281 	mrioc->reply_sz = mrioc->facts.reply_sz;
4282 
4283 	retval = mpi3mr_check_reset_dma_mask(mrioc);
4284 	if (retval) {
4285 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
4286 		    retval);
4287 		goto out_failed_noretry;
4288 	}
4289 
4290 	mpi3mr_read_tsu_interval(mrioc);
4291 	mpi3mr_print_ioc_info(mrioc);
4292 
4293 	dprint_init(mrioc, "allocating host diag buffers\n");
4294 	mpi3mr_alloc_diag_bufs(mrioc);
4295 
4296 	dprint_init(mrioc, "allocating ioctl dma buffers\n");
4297 	mpi3mr_alloc_ioctl_dma_memory(mrioc);
4298 
4299 	dprint_init(mrioc, "posting host diag buffers\n");
4300 	retval = mpi3mr_post_diag_bufs(mrioc);
4301 
4302 	if (retval)
4303 		ioc_warn(mrioc, "failed to post host diag buffers\n");
4304 
4305 	if (!mrioc->init_cmds.reply) {
4306 		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4307 		if (retval) {
4308 			ioc_err(mrioc,
4309 			    "%s :Failed to allocated reply sense buffers %d\n",
4310 			    __func__, retval);
4311 			goto out_failed_noretry;
4312 		}
4313 	}
4314 
4315 	if (!mrioc->chain_sgl_list) {
4316 		retval = mpi3mr_alloc_chain_bufs(mrioc);
4317 		if (retval) {
4318 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4319 			    retval);
4320 			goto out_failed_noretry;
4321 		}
4322 	}
4323 
4324 	retval = mpi3mr_issue_iocinit(mrioc);
4325 	if (retval) {
4326 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4327 		    retval);
4328 		goto out_failed;
4329 	}
4330 
4331 	retval = mpi3mr_print_pkg_ver(mrioc);
4332 	if (retval) {
4333 		ioc_err(mrioc, "failed to get package version\n");
4334 		goto out_failed;
4335 	}
4336 
4337 	retval = mpi3mr_setup_isr(mrioc, 0);
4338 	if (retval) {
4339 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4340 		    retval);
4341 		goto out_failed_noretry;
4342 	}
4343 
4344 	retval = mpi3mr_create_op_queues(mrioc);
4345 	if (retval) {
4346 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4347 		    retval);
4348 		goto out_failed;
4349 	}
4350 
4351 	if (!mrioc->pel_seqnum_virt) {
4352 		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4353 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4354 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4355 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4356 		    GFP_KERNEL);
4357 		if (!mrioc->pel_seqnum_virt) {
4358 			retval = -ENOMEM;
4359 			goto out_failed_noretry;
4360 		}
4361 	}
4362 
4363 	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4364 		dprint_init(mrioc, "allocating memory for throttle groups\n");
4365 		sz = sizeof(struct mpi3mr_throttle_group_info);
4366 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4367 		if (!mrioc->throttle_groups) {
4368 			retval = -1;
4369 			goto out_failed_noretry;
4370 		}
4371 	}
4372 
4373 	retval = mpi3mr_enable_events(mrioc);
4374 	if (retval) {
4375 		ioc_err(mrioc, "failed to enable events %d\n",
4376 		    retval);
4377 		goto out_failed;
4378 	}
4379 
4380 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4381 	if (retval) {
4382 		ioc_err(mrioc, "failed to refresh triggers\n");
4383 		goto out_failed;
4384 	}
4385 
4386 	ioc_info(mrioc, "controller initialization completed successfully\n");
4387 	return retval;
4388 out_failed:
4389 	if (retry < 2) {
4390 		retry++;
4391 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4392 		    retry);
4393 		mpi3mr_memset_buffers(mrioc);
4394 		goto retry_init;
4395 	}
4396 	retval = -1;
4397 out_failed_noretry:
4398 	ioc_err(mrioc, "controller initialization failed\n");
4399 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4400 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4401 	mrioc->unrecoverable = 1;
4402 	return retval;
4403 }
4404 
4405 /**
4406  * mpi3mr_reinit_ioc - Re-Initialize the controller
4407  * @mrioc: Adapter instance reference
4408  * @is_resume: Called from resume or reset path
4409  *
4410  * This the controller re-initialization routine, executed from
4411  * the soft reset handler or resume callback. Creates
4412  * operational reply queue pairs, allocate required memory for
4413  * reply pool, sense buffer pool, issue IOC init request to the
4414  * firmware, unmask the events and issue port enable to discover
4415  * SAS/SATA/NVMe devices and RAID volumes.
4416  *
4417  * Return: 0 on success and non-zero on failure.
4418  */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4419 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4420 {
4421 	int retval = 0;
4422 	u8 retry = 0;
4423 	struct mpi3_ioc_facts_data facts_data;
4424 	u32 pe_timeout, ioc_status;
4425 
4426 retry_init:
4427 	pe_timeout =
4428 	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4429 
4430 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
4431 	retval = mpi3mr_bring_ioc_ready(mrioc);
4432 	if (retval) {
4433 		ioc_err(mrioc, "failed to bring to ready state\n");
4434 		goto out_failed_noretry;
4435 	}
4436 
4437 	mrioc->io_admin_reset_sync = 0;
4438 	if (is_resume || mrioc->block_on_pci_err) {
4439 		dprint_reset(mrioc, "setting up single ISR\n");
4440 		retval = mpi3mr_setup_isr(mrioc, 1);
4441 		if (retval) {
4442 			ioc_err(mrioc, "failed to setup ISR\n");
4443 			goto out_failed_noretry;
4444 		}
4445 	} else
4446 		mpi3mr_ioc_enable_intr(mrioc);
4447 
4448 	dprint_reset(mrioc, "getting ioc_facts\n");
4449 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4450 	if (retval) {
4451 		ioc_err(mrioc, "failed to get ioc_facts\n");
4452 		goto out_failed;
4453 	}
4454 
4455 	dprint_reset(mrioc, "validating ioc_facts\n");
4456 	retval = mpi3mr_revalidate_factsdata(mrioc);
4457 	if (retval) {
4458 		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4459 		goto out_failed_noretry;
4460 	}
4461 
4462 	mpi3mr_read_tsu_interval(mrioc);
4463 	mpi3mr_print_ioc_info(mrioc);
4464 
4465 	if (is_resume) {
4466 		dprint_reset(mrioc, "posting host diag buffers\n");
4467 		retval = mpi3mr_post_diag_bufs(mrioc);
4468 		if (retval)
4469 			ioc_warn(mrioc, "failed to post host diag buffers\n");
4470 	} else {
4471 		retval = mpi3mr_repost_diag_bufs(mrioc);
4472 		if (retval)
4473 			ioc_warn(mrioc, "failed to re post host diag buffers\n");
4474 	}
4475 
4476 	dprint_reset(mrioc, "sending ioc_init\n");
4477 	retval = mpi3mr_issue_iocinit(mrioc);
4478 	if (retval) {
4479 		ioc_err(mrioc, "failed to send ioc_init\n");
4480 		goto out_failed;
4481 	}
4482 
4483 	dprint_reset(mrioc, "getting package version\n");
4484 	retval = mpi3mr_print_pkg_ver(mrioc);
4485 	if (retval) {
4486 		ioc_err(mrioc, "failed to get package version\n");
4487 		goto out_failed;
4488 	}
4489 
4490 	if (is_resume || mrioc->block_on_pci_err) {
4491 		dprint_reset(mrioc, "setting up multiple ISR\n");
4492 		retval = mpi3mr_setup_isr(mrioc, 0);
4493 		if (retval) {
4494 			ioc_err(mrioc, "failed to re-setup ISR\n");
4495 			goto out_failed_noretry;
4496 		}
4497 	}
4498 
4499 	dprint_reset(mrioc, "creating operational queue pairs\n");
4500 	retval = mpi3mr_create_op_queues(mrioc);
4501 	if (retval) {
4502 		ioc_err(mrioc, "failed to create operational queue pairs\n");
4503 		goto out_failed;
4504 	}
4505 
4506 	if (!mrioc->pel_seqnum_virt) {
4507 		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4508 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4509 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4510 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4511 		    GFP_KERNEL);
4512 		if (!mrioc->pel_seqnum_virt) {
4513 			retval = -ENOMEM;
4514 			goto out_failed_noretry;
4515 		}
4516 	}
4517 
4518 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4519 		ioc_err(mrioc,
4520 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4521 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4522 		retval = -1;
4523 		goto out_failed_noretry;
4524 	}
4525 
4526 	dprint_reset(mrioc, "enabling events\n");
4527 	retval = mpi3mr_enable_events(mrioc);
4528 	if (retval) {
4529 		ioc_err(mrioc, "failed to enable events\n");
4530 		goto out_failed;
4531 	}
4532 
4533 	mrioc->device_refresh_on = 1;
4534 	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4535 
4536 	ioc_info(mrioc, "sending port enable\n");
4537 	retval = mpi3mr_issue_port_enable(mrioc, 1);
4538 	if (retval) {
4539 		ioc_err(mrioc, "failed to issue port enable\n");
4540 		goto out_failed;
4541 	}
4542 	do {
4543 		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4544 		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4545 			break;
4546 		if (!pci_device_is_present(mrioc->pdev))
4547 			mrioc->unrecoverable = 1;
4548 		if (mrioc->unrecoverable) {
4549 			retval = -1;
4550 			goto out_failed_noretry;
4551 		}
4552 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4553 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4554 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4555 			mpi3mr_print_fault_info(mrioc);
4556 			mrioc->init_cmds.is_waiting = 0;
4557 			mrioc->init_cmds.callback = NULL;
4558 			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4559 			goto out_failed;
4560 		}
4561 	} while (--pe_timeout);
4562 
4563 	if (!pe_timeout) {
4564 		ioc_err(mrioc, "port enable timed out\n");
4565 		mpi3mr_check_rh_fault_ioc(mrioc,
4566 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4567 		mrioc->init_cmds.is_waiting = 0;
4568 		mrioc->init_cmds.callback = NULL;
4569 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4570 		goto out_failed;
4571 	} else if (mrioc->scan_failed) {
4572 		ioc_err(mrioc,
4573 		    "port enable failed with status=0x%04x\n",
4574 		    mrioc->scan_failed);
4575 	} else
4576 		ioc_info(mrioc, "port enable completed successfully\n");
4577 
4578 	ioc_info(mrioc, "controller %s completed successfully\n",
4579 	    (is_resume)?"resume":"re-initialization");
4580 	return retval;
4581 out_failed:
4582 	if (retry < 2) {
4583 		retry++;
4584 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4585 		    (is_resume)?"resume":"re-initialization", retry);
4586 		mpi3mr_memset_buffers(mrioc);
4587 		goto retry_init;
4588 	}
4589 	retval = -1;
4590 out_failed_noretry:
4591 	ioc_err(mrioc, "controller %s is failed\n",
4592 	    (is_resume)?"resume":"re-initialization");
4593 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4594 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4595 	mrioc->unrecoverable = 1;
4596 	return retval;
4597 }
4598 
4599 /**
4600  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4601  *					segments
4602  * @mrioc: Adapter instance reference
4603  * @qidx: Operational reply queue index
4604  *
4605  * Return: Nothing.
4606  */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4607 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4608 {
4609 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4610 	struct segments *segments;
4611 	int i, size;
4612 
4613 	if (!op_reply_q->q_segments)
4614 		return;
4615 
4616 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4617 	segments = op_reply_q->q_segments;
4618 	for (i = 0; i < op_reply_q->num_segments; i++)
4619 		memset(segments[i].segment, 0, size);
4620 }
4621 
4622 /**
4623  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4624  *					segments
4625  * @mrioc: Adapter instance reference
4626  * @qidx: Operational request queue index
4627  *
4628  * Return: Nothing.
4629  */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4630 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4631 {
4632 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4633 	struct segments *segments;
4634 	int i, size;
4635 
4636 	if (!op_req_q->q_segments)
4637 		return;
4638 
4639 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4640 	segments = op_req_q->q_segments;
4641 	for (i = 0; i < op_req_q->num_segments; i++)
4642 		memset(segments[i].segment, 0, size);
4643 }
4644 
4645 /**
4646  * mpi3mr_memset_buffers - memset memory for a controller
4647  * @mrioc: Adapter instance reference
4648  *
4649  * clear all the memory allocated for a controller, typically
4650  * called post reset to reuse the memory allocated during the
4651  * controller init.
4652  *
4653  * Return: Nothing.
4654  */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4655 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4656 {
4657 	u16 i;
4658 	struct mpi3mr_throttle_group_info *tg;
4659 
4660 	mrioc->change_count = 0;
4661 	mrioc->active_poll_qcount = 0;
4662 	mrioc->default_qcount = 0;
4663 	if (mrioc->admin_req_base)
4664 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4665 	if (mrioc->admin_reply_base)
4666 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4667 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4668 	atomic_set(&mrioc->admin_pend_isr, 0);
4669 
4670 	if (mrioc->init_cmds.reply) {
4671 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4672 		memset(mrioc->bsg_cmds.reply, 0,
4673 		    sizeof(*mrioc->bsg_cmds.reply));
4674 		memset(mrioc->host_tm_cmds.reply, 0,
4675 		    sizeof(*mrioc->host_tm_cmds.reply));
4676 		memset(mrioc->pel_cmds.reply, 0,
4677 		    sizeof(*mrioc->pel_cmds.reply));
4678 		memset(mrioc->pel_abort_cmd.reply, 0,
4679 		    sizeof(*mrioc->pel_abort_cmd.reply));
4680 		memset(mrioc->transport_cmds.reply, 0,
4681 		    sizeof(*mrioc->transport_cmds.reply));
4682 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4683 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4684 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4685 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4686 			memset(mrioc->evtack_cmds[i].reply, 0,
4687 			    sizeof(*mrioc->evtack_cmds[i].reply));
4688 		bitmap_clear(mrioc->removepend_bitmap, 0,
4689 			     mrioc->dev_handle_bitmap_bits);
4690 		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4691 		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4692 			     MPI3MR_NUM_EVTACKCMD);
4693 	}
4694 
4695 	for (i = 0; i < mrioc->num_queues; i++) {
4696 		mrioc->op_reply_qinfo[i].qid = 0;
4697 		mrioc->op_reply_qinfo[i].ci = 0;
4698 		mrioc->op_reply_qinfo[i].num_replies = 0;
4699 		mrioc->op_reply_qinfo[i].ephase = 0;
4700 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4701 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4702 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4703 
4704 		mrioc->req_qinfo[i].ci = 0;
4705 		mrioc->req_qinfo[i].pi = 0;
4706 		mrioc->req_qinfo[i].num_requests = 0;
4707 		mrioc->req_qinfo[i].qid = 0;
4708 		mrioc->req_qinfo[i].reply_qid = 0;
4709 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4710 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
4711 	}
4712 
4713 	atomic_set(&mrioc->pend_large_data_sz, 0);
4714 	if (mrioc->throttle_groups) {
4715 		tg = mrioc->throttle_groups;
4716 		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4717 			tg->id = 0;
4718 			tg->fw_qd = 0;
4719 			tg->modified_qd = 0;
4720 			tg->io_divert = 0;
4721 			tg->need_qd_reduction = 0;
4722 			tg->high = 0;
4723 			tg->low = 0;
4724 			tg->qd_reduction = 0;
4725 			atomic_set(&tg->pend_large_data_sz, 0);
4726 		}
4727 	}
4728 }
4729 
4730 /**
4731  * mpi3mr_free_mem - Free memory allocated for a controller
4732  * @mrioc: Adapter instance reference
4733  *
4734  * Free all the memory allocated for a controller.
4735  *
4736  * Return: Nothing.
4737  */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4738 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4739 {
4740 	u16 i, j;
4741 	struct mpi3mr_intr_info *intr_info;
4742 	struct diag_buffer_desc *diag_buffer;
4743 
4744 	mpi3mr_free_enclosure_list(mrioc);
4745 	mpi3mr_free_ioctl_dma_memory(mrioc);
4746 
4747 	if (mrioc->sense_buf_pool) {
4748 		if (mrioc->sense_buf)
4749 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4750 			    mrioc->sense_buf_dma);
4751 		dma_pool_destroy(mrioc->sense_buf_pool);
4752 		mrioc->sense_buf = NULL;
4753 		mrioc->sense_buf_pool = NULL;
4754 	}
4755 	if (mrioc->sense_buf_q_pool) {
4756 		if (mrioc->sense_buf_q)
4757 			dma_pool_free(mrioc->sense_buf_q_pool,
4758 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4759 		dma_pool_destroy(mrioc->sense_buf_q_pool);
4760 		mrioc->sense_buf_q = NULL;
4761 		mrioc->sense_buf_q_pool = NULL;
4762 	}
4763 
4764 	if (mrioc->reply_buf_pool) {
4765 		if (mrioc->reply_buf)
4766 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4767 			    mrioc->reply_buf_dma);
4768 		dma_pool_destroy(mrioc->reply_buf_pool);
4769 		mrioc->reply_buf = NULL;
4770 		mrioc->reply_buf_pool = NULL;
4771 	}
4772 	if (mrioc->reply_free_q_pool) {
4773 		if (mrioc->reply_free_q)
4774 			dma_pool_free(mrioc->reply_free_q_pool,
4775 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4776 		dma_pool_destroy(mrioc->reply_free_q_pool);
4777 		mrioc->reply_free_q = NULL;
4778 		mrioc->reply_free_q_pool = NULL;
4779 	}
4780 
4781 	for (i = 0; i < mrioc->num_op_req_q; i++)
4782 		mpi3mr_free_op_req_q_segments(mrioc, i);
4783 
4784 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4785 		mpi3mr_free_op_reply_q_segments(mrioc, i);
4786 
4787 	for (i = 0; i < mrioc->intr_info_count; i++) {
4788 		intr_info = mrioc->intr_info + i;
4789 		intr_info->op_reply_q = NULL;
4790 	}
4791 
4792 	kfree(mrioc->req_qinfo);
4793 	mrioc->req_qinfo = NULL;
4794 	mrioc->num_op_req_q = 0;
4795 
4796 	kfree(mrioc->op_reply_qinfo);
4797 	mrioc->op_reply_qinfo = NULL;
4798 	mrioc->num_op_reply_q = 0;
4799 
4800 	kfree(mrioc->init_cmds.reply);
4801 	mrioc->init_cmds.reply = NULL;
4802 
4803 	kfree(mrioc->bsg_cmds.reply);
4804 	mrioc->bsg_cmds.reply = NULL;
4805 
4806 	kfree(mrioc->host_tm_cmds.reply);
4807 	mrioc->host_tm_cmds.reply = NULL;
4808 
4809 	kfree(mrioc->pel_cmds.reply);
4810 	mrioc->pel_cmds.reply = NULL;
4811 
4812 	kfree(mrioc->pel_abort_cmd.reply);
4813 	mrioc->pel_abort_cmd.reply = NULL;
4814 
4815 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4816 		kfree(mrioc->evtack_cmds[i].reply);
4817 		mrioc->evtack_cmds[i].reply = NULL;
4818 	}
4819 
4820 	bitmap_free(mrioc->removepend_bitmap);
4821 	mrioc->removepend_bitmap = NULL;
4822 
4823 	bitmap_free(mrioc->devrem_bitmap);
4824 	mrioc->devrem_bitmap = NULL;
4825 
4826 	bitmap_free(mrioc->evtack_cmds_bitmap);
4827 	mrioc->evtack_cmds_bitmap = NULL;
4828 
4829 	bitmap_free(mrioc->chain_bitmap);
4830 	mrioc->chain_bitmap = NULL;
4831 
4832 	kfree(mrioc->transport_cmds.reply);
4833 	mrioc->transport_cmds.reply = NULL;
4834 
4835 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4836 		kfree(mrioc->dev_rmhs_cmds[i].reply);
4837 		mrioc->dev_rmhs_cmds[i].reply = NULL;
4838 	}
4839 
4840 	if (mrioc->chain_buf_pool) {
4841 		for (i = 0; i < mrioc->chain_buf_count; i++) {
4842 			if (mrioc->chain_sgl_list[i].addr) {
4843 				dma_pool_free(mrioc->chain_buf_pool,
4844 				    mrioc->chain_sgl_list[i].addr,
4845 				    mrioc->chain_sgl_list[i].dma_addr);
4846 				mrioc->chain_sgl_list[i].addr = NULL;
4847 			}
4848 		}
4849 		dma_pool_destroy(mrioc->chain_buf_pool);
4850 		mrioc->chain_buf_pool = NULL;
4851 	}
4852 
4853 	kfree(mrioc->chain_sgl_list);
4854 	mrioc->chain_sgl_list = NULL;
4855 
4856 	if (mrioc->admin_reply_base) {
4857 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4858 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
4859 		mrioc->admin_reply_base = NULL;
4860 	}
4861 	if (mrioc->admin_req_base) {
4862 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4863 		    mrioc->admin_req_base, mrioc->admin_req_dma);
4864 		mrioc->admin_req_base = NULL;
4865 	}
4866 
4867 	if (mrioc->pel_seqnum_virt) {
4868 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4869 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4870 		mrioc->pel_seqnum_virt = NULL;
4871 	}
4872 
4873 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4874 		diag_buffer = &mrioc->diag_buffers[i];
4875 		if ((i == 0) && mrioc->seg_tb_support) {
4876 			if (mrioc->trace_buf_pool) {
4877 				for (j = 0; j < mrioc->num_tb_segs; j++) {
4878 					if (mrioc->trace_buf[j].segment) {
4879 						dma_pool_free(mrioc->trace_buf_pool,
4880 						    mrioc->trace_buf[j].segment,
4881 						    mrioc->trace_buf[j].segment_dma);
4882 						mrioc->trace_buf[j].segment = NULL;
4883 					}
4884 
4885 					mrioc->trace_buf[j].segment = NULL;
4886 				}
4887 				dma_pool_destroy(mrioc->trace_buf_pool);
4888 				mrioc->trace_buf_pool = NULL;
4889 			}
4890 
4891 			kfree(mrioc->trace_buf);
4892 			mrioc->trace_buf = NULL;
4893 			diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
4894 		}
4895 		if (diag_buffer->addr) {
4896 			dma_free_coherent(&mrioc->pdev->dev,
4897 			    diag_buffer->size, diag_buffer->addr,
4898 			    diag_buffer->dma_addr);
4899 			diag_buffer->addr = NULL;
4900 			diag_buffer->size = 0;
4901 			diag_buffer->type = 0;
4902 			diag_buffer->status = 0;
4903 		}
4904 	}
4905 
4906 	kfree(mrioc->throttle_groups);
4907 	mrioc->throttle_groups = NULL;
4908 
4909 	kfree(mrioc->logdata_buf);
4910 	mrioc->logdata_buf = NULL;
4911 
4912 }
4913 
4914 /**
4915  * mpi3mr_issue_ioc_shutdown - shutdown controller
4916  * @mrioc: Adapter instance reference
4917  *
4918  * Send shutodwn notification to the controller and wait for the
4919  * shutdown_timeout for it to be completed.
4920  *
4921  * Return: Nothing.
4922  */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)4923 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4924 {
4925 	u32 ioc_config, ioc_status;
4926 	u8 retval = 1;
4927 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4928 
4929 	ioc_info(mrioc, "Issuing shutdown Notification\n");
4930 	if (mrioc->unrecoverable) {
4931 		ioc_warn(mrioc,
4932 		    "IOC is unrecoverable shutdown is not issued\n");
4933 		return;
4934 	}
4935 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4936 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4937 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4938 		ioc_info(mrioc, "shutdown already in progress\n");
4939 		return;
4940 	}
4941 
4942 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4943 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4944 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4945 
4946 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4947 
4948 	if (mrioc->facts.shutdown_timeout)
4949 		timeout = mrioc->facts.shutdown_timeout * 10;
4950 
4951 	do {
4952 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4953 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4954 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4955 			retval = 0;
4956 			break;
4957 		}
4958 		msleep(100);
4959 	} while (--timeout);
4960 
4961 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4962 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4963 
4964 	if (retval) {
4965 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4966 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4967 			ioc_warn(mrioc,
4968 			    "shutdown still in progress after timeout\n");
4969 	}
4970 
4971 	ioc_info(mrioc,
4972 	    "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
4973 	    (!retval) ? "successful" : "failed", ioc_status,
4974 	    ioc_config);
4975 }
4976 
4977 /**
4978  * mpi3mr_cleanup_ioc - Cleanup controller
4979  * @mrioc: Adapter instance reference
4980  *
4981  * controller cleanup handler, Message unit reset or soft reset
4982  * and shutdown notification is issued to the controller.
4983  *
4984  * Return: Nothing.
4985  */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)4986 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4987 {
4988 	enum mpi3mr_iocstate ioc_state;
4989 
4990 	dprint_exit(mrioc, "cleaning up the controller\n");
4991 	mpi3mr_ioc_disable_intr(mrioc);
4992 
4993 	ioc_state = mpi3mr_get_iocstate(mrioc);
4994 
4995 	if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
4996 	    !mrioc->pci_err_recovery &&
4997 	    (ioc_state == MRIOC_STATE_READY)) {
4998 		if (mpi3mr_issue_and_process_mur(mrioc,
4999 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
5000 			mpi3mr_issue_reset(mrioc,
5001 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5002 			    MPI3MR_RESET_FROM_MUR_FAILURE);
5003 		mpi3mr_issue_ioc_shutdown(mrioc);
5004 	}
5005 	dprint_exit(mrioc, "controller cleanup completed\n");
5006 }
5007 
5008 /**
5009  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5010  * @mrioc: Adapter instance reference
5011  * @cmdptr: Internal command tracker
5012  *
5013  * Complete an internal driver commands with state indicating it
5014  * is completed due to reset.
5015  *
5016  * Return: Nothing.
5017  */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)5018 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5019 	struct mpi3mr_drv_cmd *cmdptr)
5020 {
5021 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5022 		cmdptr->state |= MPI3MR_CMD_RESET;
5023 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5024 		if (cmdptr->is_waiting) {
5025 			complete(&cmdptr->done);
5026 			cmdptr->is_waiting = 0;
5027 		} else if (cmdptr->callback)
5028 			cmdptr->callback(mrioc, cmdptr);
5029 	}
5030 }
5031 
5032 /**
5033  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5034  * @mrioc: Adapter instance reference
5035  *
5036  * Flush all internal driver commands post reset
5037  *
5038  * Return: Nothing.
5039  */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)5040 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5041 {
5042 	struct mpi3mr_drv_cmd *cmdptr;
5043 	u8 i;
5044 
5045 	cmdptr = &mrioc->init_cmds;
5046 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5047 
5048 	cmdptr = &mrioc->cfg_cmds;
5049 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5050 
5051 	cmdptr = &mrioc->bsg_cmds;
5052 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5053 	cmdptr = &mrioc->host_tm_cmds;
5054 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5055 
5056 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5057 		cmdptr = &mrioc->dev_rmhs_cmds[i];
5058 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5059 	}
5060 
5061 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5062 		cmdptr = &mrioc->evtack_cmds[i];
5063 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5064 	}
5065 
5066 	cmdptr = &mrioc->pel_cmds;
5067 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5068 
5069 	cmdptr = &mrioc->pel_abort_cmd;
5070 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5071 
5072 	cmdptr = &mrioc->transport_cmds;
5073 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5074 }
5075 
5076 /**
5077  * mpi3mr_pel_wait_post - Issue PEL Wait
5078  * @mrioc: Adapter instance reference
5079  * @drv_cmd: Internal command tracker
5080  *
5081  * Issue PEL Wait MPI request through admin queue and return.
5082  *
5083  * Return: Nothing.
5084  */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5085 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5086 	struct mpi3mr_drv_cmd *drv_cmd)
5087 {
5088 	struct mpi3_pel_req_action_wait pel_wait;
5089 
5090 	mrioc->pel_abort_requested = false;
5091 
5092 	memset(&pel_wait, 0, sizeof(pel_wait));
5093 	drv_cmd->state = MPI3MR_CMD_PENDING;
5094 	drv_cmd->is_waiting = 0;
5095 	drv_cmd->callback = mpi3mr_pel_wait_complete;
5096 	drv_cmd->ioc_status = 0;
5097 	drv_cmd->ioc_loginfo = 0;
5098 	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5099 	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5100 	pel_wait.action = MPI3_PEL_ACTION_WAIT;
5101 	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5102 	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5103 	pel_wait.class = cpu_to_le16(mrioc->pel_class);
5104 	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5105 	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5106 	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5107 
5108 	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5109 		dprint_bsg_err(mrioc,
5110 			    "Issuing PELWait: Admin post failed\n");
5111 		drv_cmd->state = MPI3MR_CMD_NOTUSED;
5112 		drv_cmd->callback = NULL;
5113 		drv_cmd->retry_count = 0;
5114 		mrioc->pel_enabled = false;
5115 	}
5116 }
5117 
5118 /**
5119  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5120  * @mrioc: Adapter instance reference
5121  * @drv_cmd: Internal command tracker
5122  *
5123  * Issue PEL get sequence number MPI request through admin queue
5124  * and return.
5125  *
5126  * Return: 0 on success, non-zero on failure.
5127  */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5128 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5129 	struct mpi3mr_drv_cmd *drv_cmd)
5130 {
5131 	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5132 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5133 	int retval = 0;
5134 
5135 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5136 	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5137 	mrioc->pel_cmds.is_waiting = 0;
5138 	mrioc->pel_cmds.ioc_status = 0;
5139 	mrioc->pel_cmds.ioc_loginfo = 0;
5140 	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5141 	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5142 	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5143 	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5144 	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5145 	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5146 
5147 	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5148 			sizeof(pel_getseq_req), 0);
5149 	if (retval) {
5150 		if (drv_cmd) {
5151 			drv_cmd->state = MPI3MR_CMD_NOTUSED;
5152 			drv_cmd->callback = NULL;
5153 			drv_cmd->retry_count = 0;
5154 		}
5155 		mrioc->pel_enabled = false;
5156 	}
5157 
5158 	return retval;
5159 }
5160 
5161 /**
5162  * mpi3mr_pel_wait_complete - PELWait Completion callback
5163  * @mrioc: Adapter instance reference
5164  * @drv_cmd: Internal command tracker
5165  *
5166  * This is a callback handler for the PELWait request and
5167  * firmware completes a PELWait request when it is aborted or a
5168  * new PEL entry is available. This sends AEN to the application
5169  * and if the PELwait completion is not due to PELAbort then
5170  * this will send a request for new PEL Sequence number
5171  *
5172  * Return: Nothing.
5173  */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5174 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5175 	struct mpi3mr_drv_cmd *drv_cmd)
5176 {
5177 	struct mpi3_pel_reply *pel_reply = NULL;
5178 	u16 ioc_status, pe_log_status;
5179 	bool do_retry = false;
5180 
5181 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5182 		goto cleanup_drv_cmd;
5183 
5184 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5185 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5186 		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5187 			__func__, ioc_status, drv_cmd->ioc_loginfo);
5188 		dprint_bsg_err(mrioc,
5189 		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5190 		    ioc_status, drv_cmd->ioc_loginfo);
5191 		do_retry = true;
5192 	}
5193 
5194 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5195 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5196 
5197 	if (!pel_reply) {
5198 		dprint_bsg_err(mrioc,
5199 		    "pel_wait: failed due to no reply\n");
5200 		goto out_failed;
5201 	}
5202 
5203 	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5204 	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5205 	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5206 		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5207 			__func__, pe_log_status);
5208 		dprint_bsg_err(mrioc,
5209 		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
5210 		    pe_log_status);
5211 		do_retry = true;
5212 	}
5213 
5214 	if (do_retry) {
5215 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5216 			drv_cmd->retry_count++;
5217 			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5218 			    drv_cmd->retry_count);
5219 			mpi3mr_pel_wait_post(mrioc, drv_cmd);
5220 			return;
5221 		}
5222 		dprint_bsg_err(mrioc,
5223 		    "pel_wait: failed after all retries(%d)\n",
5224 		    drv_cmd->retry_count);
5225 		goto out_failed;
5226 	}
5227 	atomic64_inc(&event_counter);
5228 	if (!mrioc->pel_abort_requested) {
5229 		mrioc->pel_cmds.retry_count = 0;
5230 		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5231 	}
5232 
5233 	return;
5234 out_failed:
5235 	mrioc->pel_enabled = false;
5236 cleanup_drv_cmd:
5237 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5238 	drv_cmd->callback = NULL;
5239 	drv_cmd->retry_count = 0;
5240 }
5241 
5242 /**
5243  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5244  * @mrioc: Adapter instance reference
5245  * @drv_cmd: Internal command tracker
5246  *
5247  * This is a callback handler for the PEL get sequence number
5248  * request and a new PEL wait request will be issued to the
5249  * firmware from this
5250  *
5251  * Return: Nothing.
5252  */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5253 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5254 	struct mpi3mr_drv_cmd *drv_cmd)
5255 {
5256 	struct mpi3_pel_reply *pel_reply = NULL;
5257 	struct mpi3_pel_seq *pel_seqnum_virt;
5258 	u16 ioc_status;
5259 	bool do_retry = false;
5260 
5261 	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5262 
5263 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5264 		goto cleanup_drv_cmd;
5265 
5266 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5267 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5268 		dprint_bsg_err(mrioc,
5269 		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5270 		    ioc_status, drv_cmd->ioc_loginfo);
5271 		do_retry = true;
5272 	}
5273 
5274 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5275 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5276 	if (!pel_reply) {
5277 		dprint_bsg_err(mrioc,
5278 		    "pel_get_seqnum: failed due to no reply\n");
5279 		goto out_failed;
5280 	}
5281 
5282 	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5283 		dprint_bsg_err(mrioc,
5284 		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5285 		    le16_to_cpu(pel_reply->pe_log_status));
5286 		do_retry = true;
5287 	}
5288 
5289 	if (do_retry) {
5290 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5291 			drv_cmd->retry_count++;
5292 			dprint_bsg_err(mrioc,
5293 			    "pel_get_seqnum: retrying(%d)\n",
5294 			    drv_cmd->retry_count);
5295 			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5296 			return;
5297 		}
5298 
5299 		dprint_bsg_err(mrioc,
5300 		    "pel_get_seqnum: failed after all retries(%d)\n",
5301 		    drv_cmd->retry_count);
5302 		goto out_failed;
5303 	}
5304 	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5305 	drv_cmd->retry_count = 0;
5306 	mpi3mr_pel_wait_post(mrioc, drv_cmd);
5307 
5308 	return;
5309 out_failed:
5310 	mrioc->pel_enabled = false;
5311 cleanup_drv_cmd:
5312 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5313 	drv_cmd->callback = NULL;
5314 	drv_cmd->retry_count = 0;
5315 }
5316 
5317 /**
5318  * mpi3mr_check_op_admin_proc -
5319  * @mrioc: Adapter instance reference
5320  *
5321  * Check if any of the operation reply queues
5322  * or the admin reply queue are currently in use.
5323  * If any queue is in use, this function waits for
5324  * a maximum of 10 seconds for them to become available.
5325  *
5326  * Return: 0 on success, non-zero on failure.
5327  */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5328 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5329 {
5330 
5331 	u16 timeout = 10 * 10;
5332 	u16 elapsed_time = 0;
5333 	bool op_admin_in_use = false;
5334 
5335 	do {
5336 		op_admin_in_use = false;
5337 
5338 		/* Check admin_reply queue first to exit early */
5339 		if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5340 			op_admin_in_use = true;
5341 		else {
5342 			/* Check op_reply queues */
5343 			int i;
5344 
5345 			for (i = 0; i < mrioc->num_queues; i++) {
5346 				if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5347 					op_admin_in_use = true;
5348 					break;
5349 				}
5350 			}
5351 		}
5352 
5353 		if (!op_admin_in_use)
5354 			break;
5355 
5356 		msleep(100);
5357 
5358 	} while (++elapsed_time < timeout);
5359 
5360 	if (op_admin_in_use)
5361 		return 1;
5362 
5363 	return 0;
5364 }
5365 
5366 /**
5367  * mpi3mr_soft_reset_handler - Reset the controller
5368  * @mrioc: Adapter instance reference
5369  * @reset_reason: Reset reason code
5370  * @snapdump: Flag to generate snapdump in firmware or not
5371  *
5372  * This is an handler for recovering controller by issuing soft
5373  * reset are diag fault reset.  This is a blocking function and
5374  * when one reset is executed if any other resets they will be
5375  * blocked. All BSG requests will be blocked during the reset. If
5376  * controller reset is successful then the controller will be
5377  * reinitalized, otherwise the controller will be marked as not
5378  * recoverable
5379  *
5380  * In snapdump bit is set, the controller is issued with diag
5381  * fault reset so that the firmware can create a snap dump and
5382  * post that the firmware will result in F000 fault and the
5383  * driver will issue soft reset to recover from that.
5384  *
5385  * Return: 0 on success, non-zero on failure.
5386  */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5387 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5388 	u16 reset_reason, u8 snapdump)
5389 {
5390 	int retval = 0, i;
5391 	unsigned long flags;
5392 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5393 	union mpi3mr_trigger_data trigger_data;
5394 
5395 	/* Block the reset handler until diag save in progress*/
5396 	dprint_reset(mrioc,
5397 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5398 	    mrioc->diagsave_timeout);
5399 	while (mrioc->diagsave_timeout)
5400 		ssleep(1);
5401 	/*
5402 	 * Block new resets until the currently executing one is finished and
5403 	 * return the status of the existing reset for all blocked resets
5404 	 */
5405 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5406 	if (!mutex_trylock(&mrioc->reset_mutex)) {
5407 		ioc_info(mrioc,
5408 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
5409 		    mpi3mr_reset_rc_name(reset_reason));
5410 		do {
5411 			ssleep(1);
5412 		} while (mrioc->reset_in_progress == 1);
5413 		ioc_info(mrioc,
5414 		    "returning previous reset result(%d) for the reset triggered by %s\n",
5415 		    mrioc->prev_reset_result,
5416 		    mpi3mr_reset_rc_name(reset_reason));
5417 		return mrioc->prev_reset_result;
5418 	}
5419 	ioc_info(mrioc, "controller reset is triggered by %s\n",
5420 	    mpi3mr_reset_rc_name(reset_reason));
5421 
5422 	mrioc->device_refresh_on = 0;
5423 	mrioc->reset_in_progress = 1;
5424 	mrioc->stop_bsgs = 1;
5425 	mrioc->prev_reset_result = -1;
5426 	memset(&trigger_data, 0, sizeof(trigger_data));
5427 
5428 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5429 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5430 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5431 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5432 		    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5433 		dprint_reset(mrioc,
5434 		    "soft_reset_handler: releasing host diagnostic buffers\n");
5435 		mpi3mr_release_diag_bufs(mrioc, 0);
5436 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5437 			mrioc->event_masks[i] = -1;
5438 
5439 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5440 		mpi3mr_issue_event_notification(mrioc);
5441 	}
5442 
5443 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5444 
5445 	mpi3mr_ioc_disable_intr(mrioc);
5446 	mrioc->io_admin_reset_sync = 1;
5447 
5448 	if (snapdump) {
5449 		mpi3mr_set_diagsave(mrioc);
5450 		retval = mpi3mr_issue_reset(mrioc,
5451 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5452 		if (!retval) {
5453 			trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5454 				      MPI3_SYSIF_FAULT_CODE_MASK);
5455 			do {
5456 				host_diagnostic =
5457 				    readl(&mrioc->sysif_regs->host_diagnostic);
5458 				if (!(host_diagnostic &
5459 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5460 					break;
5461 				msleep(100);
5462 			} while (--timeout);
5463 			mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5464 			    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5465 		}
5466 	}
5467 
5468 	retval = mpi3mr_issue_reset(mrioc,
5469 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5470 	if (retval) {
5471 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5472 		goto out;
5473 	}
5474 
5475 	retval = mpi3mr_check_op_admin_proc(mrioc);
5476 	if (retval) {
5477 		ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5478 				"thread still processing replies even after a 10 second\n"
5479 				"timeout. Marking the controller as unrecoverable!\n");
5480 
5481 		goto out;
5482 	}
5483 
5484 	if (mrioc->num_io_throttle_group !=
5485 	    mrioc->facts.max_io_throttle_group) {
5486 		ioc_err(mrioc,
5487 		    "max io throttle group doesn't match old(%d), new(%d)\n",
5488 		    mrioc->num_io_throttle_group,
5489 		    mrioc->facts.max_io_throttle_group);
5490 		retval = -EPERM;
5491 		goto out;
5492 	}
5493 
5494 	mpi3mr_flush_delayed_cmd_lists(mrioc);
5495 	mpi3mr_flush_drv_cmds(mrioc);
5496 	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5497 	bitmap_clear(mrioc->removepend_bitmap, 0,
5498 		     mrioc->dev_handle_bitmap_bits);
5499 	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5500 	mpi3mr_flush_host_io(mrioc);
5501 	mpi3mr_cleanup_fwevt_list(mrioc);
5502 	mpi3mr_invalidate_devhandles(mrioc);
5503 	mpi3mr_free_enclosure_list(mrioc);
5504 
5505 	if (mrioc->prepare_for_reset) {
5506 		mrioc->prepare_for_reset = 0;
5507 		mrioc->prepare_for_reset_timeout_counter = 0;
5508 	}
5509 	mpi3mr_memset_buffers(mrioc);
5510 	mpi3mr_release_diag_bufs(mrioc, 1);
5511 	mrioc->fw_release_trigger_active = false;
5512 	mrioc->trace_release_trigger_active = false;
5513 	mrioc->snapdump_trigger_active = false;
5514 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5515 	    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5516 
5517 	dprint_reset(mrioc,
5518 	    "soft_reset_handler: reinitializing the controller\n");
5519 	retval = mpi3mr_reinit_ioc(mrioc, 0);
5520 	if (retval) {
5521 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5522 		    mrioc->name, reset_reason);
5523 		goto out;
5524 	}
5525 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5526 
5527 out:
5528 	if (!retval) {
5529 		mrioc->diagsave_timeout = 0;
5530 		mrioc->reset_in_progress = 0;
5531 		mrioc->pel_abort_requested = 0;
5532 		if (mrioc->pel_enabled) {
5533 			mrioc->pel_cmds.retry_count = 0;
5534 			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5535 		}
5536 
5537 		mrioc->device_refresh_on = 0;
5538 
5539 		mrioc->ts_update_counter = 0;
5540 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5541 		if (mrioc->watchdog_work_q)
5542 			queue_delayed_work(mrioc->watchdog_work_q,
5543 			    &mrioc->watchdog_work,
5544 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5545 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5546 		mrioc->stop_bsgs = 0;
5547 		if (mrioc->pel_enabled)
5548 			atomic64_inc(&event_counter);
5549 	} else {
5550 		mpi3mr_issue_reset(mrioc,
5551 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5552 		mrioc->device_refresh_on = 0;
5553 		mrioc->unrecoverable = 1;
5554 		mrioc->reset_in_progress = 0;
5555 		mrioc->stop_bsgs = 0;
5556 		retval = -1;
5557 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5558 	}
5559 	mrioc->prev_reset_result = retval;
5560 	mutex_unlock(&mrioc->reset_mutex);
5561 	ioc_info(mrioc, "controller reset is %s\n",
5562 	    ((retval == 0) ? "successful" : "failed"));
5563 	return retval;
5564 }
5565 
5566 /**
5567  * mpi3mr_post_cfg_req - Issue config requests and wait
5568  * @mrioc: Adapter instance reference
5569  * @cfg_req: Configuration request
5570  * @timeout: Timeout in seconds
5571  * @ioc_status: Pointer to return ioc status
5572  *
5573  * A generic function for posting MPI3 configuration request to
5574  * the firmware. This blocks for the completion of request for
5575  * timeout seconds and if the request times out this function
5576  * faults the controller with proper reason code.
5577  *
5578  * On successful completion of the request this function returns
5579  * appropriate ioc status from the firmware back to the caller.
5580  *
5581  * Return: 0 on success, non-zero on failure.
5582  */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5583 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5584 	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5585 {
5586 	int retval = 0;
5587 
5588 	mutex_lock(&mrioc->cfg_cmds.mutex);
5589 	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5590 		retval = -1;
5591 		ioc_err(mrioc, "sending config request failed due to command in use\n");
5592 		mutex_unlock(&mrioc->cfg_cmds.mutex);
5593 		goto out;
5594 	}
5595 	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5596 	mrioc->cfg_cmds.is_waiting = 1;
5597 	mrioc->cfg_cmds.callback = NULL;
5598 	mrioc->cfg_cmds.ioc_status = 0;
5599 	mrioc->cfg_cmds.ioc_loginfo = 0;
5600 
5601 	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5602 	cfg_req->function = MPI3_FUNCTION_CONFIG;
5603 
5604 	init_completion(&mrioc->cfg_cmds.done);
5605 	dprint_cfg_info(mrioc, "posting config request\n");
5606 	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5607 		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5608 		    "mpi3_cfg_req");
5609 	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5610 	if (retval) {
5611 		ioc_err(mrioc, "posting config request failed\n");
5612 		goto out_unlock;
5613 	}
5614 	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5615 	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5616 		mpi3mr_check_rh_fault_ioc(mrioc,
5617 		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5618 		ioc_err(mrioc, "config request timed out\n");
5619 		retval = -1;
5620 		goto out_unlock;
5621 	}
5622 	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5623 	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5624 		dprint_cfg_err(mrioc,
5625 		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5626 		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5627 
5628 out_unlock:
5629 	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5630 	mutex_unlock(&mrioc->cfg_cmds.mutex);
5631 
5632 out:
5633 	return retval;
5634 }
5635 
5636 /**
5637  * mpi3mr_process_cfg_req - config page request processor
5638  * @mrioc: Adapter instance reference
5639  * @cfg_req: Configuration request
5640  * @cfg_hdr: Configuration page header
5641  * @timeout: Timeout in seconds
5642  * @ioc_status: Pointer to return ioc status
5643  * @cfg_buf: Memory pointer to copy config page or header
5644  * @cfg_buf_sz: Size of the memory to get config page or header
5645  *
5646  * This is handler for config page read, write and config page
5647  * header read operations.
5648  *
5649  * This function expects the cfg_req to be populated with page
5650  * type, page number, action for the header read and with page
5651  * address for all other operations.
5652  *
5653  * The cfg_hdr can be passed as null for reading required header
5654  * details for read/write pages the cfg_hdr should point valid
5655  * configuration page header.
5656  *
5657  * This allocates dmaable memory based on the size of the config
5658  * buffer and set the SGE of the cfg_req.
5659  *
5660  * For write actions, the config page data has to be passed in
5661  * the cfg_buf and size of the data has to be mentioned in the
5662  * cfg_buf_sz.
5663  *
5664  * For read/header actions, on successful completion of the
5665  * request with successful ioc_status the data will be copied
5666  * into the cfg_buf limited to a minimum of actual page size and
5667  * cfg_buf_sz
5668  *
5669  *
5670  * Return: 0 on success, non-zero on failure.
5671  */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5672 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5673 	struct mpi3_config_request *cfg_req,
5674 	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5675 	void *cfg_buf, u32 cfg_buf_sz)
5676 {
5677 	struct dma_memory_desc mem_desc;
5678 	int retval = -1;
5679 	u8 invalid_action = 0;
5680 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5681 
5682 	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5683 
5684 	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5685 		mem_desc.size = sizeof(struct mpi3_config_page_header);
5686 	else {
5687 		if (!cfg_hdr) {
5688 			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5689 			    cfg_req->action, cfg_req->page_type,
5690 			    cfg_req->page_number);
5691 			goto out;
5692 		}
5693 		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5694 		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5695 			if (cfg_req->action
5696 			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5697 				invalid_action = 1;
5698 			break;
5699 		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5700 			if ((cfg_req->action ==
5701 			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5702 			    (cfg_req->action ==
5703 			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5704 				invalid_action = 1;
5705 			break;
5706 		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5707 		default:
5708 			break;
5709 		}
5710 		if (invalid_action) {
5711 			ioc_err(mrioc,
5712 			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5713 			    cfg_req->action, cfg_req->page_type,
5714 			    cfg_req->page_number, cfg_hdr->page_attribute);
5715 			goto out;
5716 		}
5717 		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5718 		cfg_req->page_length = cfg_hdr->page_length;
5719 		cfg_req->page_version = cfg_hdr->page_version;
5720 	}
5721 
5722 	mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5723 		mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5724 
5725 	if (!mem_desc.addr)
5726 		return retval;
5727 
5728 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5729 	    mem_desc.dma_addr);
5730 
5731 	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5732 	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5733 		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5734 		    cfg_buf_sz));
5735 		dprint_cfg_info(mrioc, "config buffer to be written\n");
5736 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5737 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5738 	}
5739 
5740 	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5741 		goto out;
5742 
5743 	retval = 0;
5744 	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5745 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5746 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5747 		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5748 		    cfg_buf_sz));
5749 		dprint_cfg_info(mrioc, "config buffer read\n");
5750 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5751 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5752 	}
5753 
5754 out:
5755 	if (mem_desc.addr) {
5756 		dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5757 			mem_desc.addr, mem_desc.dma_addr);
5758 		mem_desc.addr = NULL;
5759 	}
5760 
5761 	return retval;
5762 }
5763 
5764 /**
5765  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5766  * @mrioc: Adapter instance reference
5767  * @ioc_status: Pointer to return ioc status
5768  * @dev_pg0: Pointer to return device page 0
5769  * @pg_sz: Size of the memory allocated to the page pointer
5770  * @form: The form to be used for addressing the page
5771  * @form_spec: Form specific information like device handle
5772  *
5773  * This is handler for config page read for a specific device
5774  * page0. The ioc_status has the controller returned ioc_status.
5775  * This routine doesn't check ioc_status to decide whether the
5776  * page read is success or not and it is the callers
5777  * responsibility.
5778  *
5779  * Return: 0 on success, non-zero on failure.
5780  */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5781 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5782 	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5783 {
5784 	struct mpi3_config_page_header cfg_hdr;
5785 	struct mpi3_config_request cfg_req;
5786 	u32 page_address;
5787 
5788 	memset(dev_pg0, 0, pg_sz);
5789 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5790 	memset(&cfg_req, 0, sizeof(cfg_req));
5791 
5792 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5793 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5794 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5795 	cfg_req.page_number = 0;
5796 	cfg_req.page_address = 0;
5797 
5798 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5799 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5800 		ioc_err(mrioc, "device page0 header read failed\n");
5801 		goto out_failed;
5802 	}
5803 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5804 		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5805 		    *ioc_status);
5806 		goto out_failed;
5807 	}
5808 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5809 	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5810 	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5811 	cfg_req.page_address = cpu_to_le32(page_address);
5812 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5813 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5814 		ioc_err(mrioc, "device page0 read failed\n");
5815 		goto out_failed;
5816 	}
5817 	return 0;
5818 out_failed:
5819 	return -1;
5820 }
5821 
5822 
5823 /**
5824  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5825  * @mrioc: Adapter instance reference
5826  * @ioc_status: Pointer to return ioc status
5827  * @phy_pg0: Pointer to return SAS Phy page 0
5828  * @pg_sz: Size of the memory allocated to the page pointer
5829  * @form: The form to be used for addressing the page
5830  * @form_spec: Form specific information like phy number
5831  *
5832  * This is handler for config page read for a specific SAS Phy
5833  * page0. The ioc_status has the controller returned ioc_status.
5834  * This routine doesn't check ioc_status to decide whether the
5835  * page read is success or not and it is the callers
5836  * responsibility.
5837  *
5838  * Return: 0 on success, non-zero on failure.
5839  */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5840 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5841 	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5842 	u32 form_spec)
5843 {
5844 	struct mpi3_config_page_header cfg_hdr;
5845 	struct mpi3_config_request cfg_req;
5846 	u32 page_address;
5847 
5848 	memset(phy_pg0, 0, pg_sz);
5849 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5850 	memset(&cfg_req, 0, sizeof(cfg_req));
5851 
5852 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5853 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5854 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5855 	cfg_req.page_number = 0;
5856 	cfg_req.page_address = 0;
5857 
5858 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5859 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5860 		ioc_err(mrioc, "sas phy page0 header read failed\n");
5861 		goto out_failed;
5862 	}
5863 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5864 		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5865 		    *ioc_status);
5866 		goto out_failed;
5867 	}
5868 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5869 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5870 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5871 	cfg_req.page_address = cpu_to_le32(page_address);
5872 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5873 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5874 		ioc_err(mrioc, "sas phy page0 read failed\n");
5875 		goto out_failed;
5876 	}
5877 	return 0;
5878 out_failed:
5879 	return -1;
5880 }
5881 
5882 /**
5883  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5884  * @mrioc: Adapter instance reference
5885  * @ioc_status: Pointer to return ioc status
5886  * @phy_pg1: Pointer to return SAS Phy page 1
5887  * @pg_sz: Size of the memory allocated to the page pointer
5888  * @form: The form to be used for addressing the page
5889  * @form_spec: Form specific information like phy number
5890  *
5891  * This is handler for config page read for a specific SAS Phy
5892  * page1. The ioc_status has the controller returned ioc_status.
5893  * This routine doesn't check ioc_status to decide whether the
5894  * page read is success or not and it is the callers
5895  * responsibility.
5896  *
5897  * Return: 0 on success, non-zero on failure.
5898  */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)5899 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5900 	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5901 	u32 form_spec)
5902 {
5903 	struct mpi3_config_page_header cfg_hdr;
5904 	struct mpi3_config_request cfg_req;
5905 	u32 page_address;
5906 
5907 	memset(phy_pg1, 0, pg_sz);
5908 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5909 	memset(&cfg_req, 0, sizeof(cfg_req));
5910 
5911 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5912 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5913 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5914 	cfg_req.page_number = 1;
5915 	cfg_req.page_address = 0;
5916 
5917 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5918 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5919 		ioc_err(mrioc, "sas phy page1 header read failed\n");
5920 		goto out_failed;
5921 	}
5922 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5923 		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5924 		    *ioc_status);
5925 		goto out_failed;
5926 	}
5927 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5928 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5929 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5930 	cfg_req.page_address = cpu_to_le32(page_address);
5931 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5932 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5933 		ioc_err(mrioc, "sas phy page1 read failed\n");
5934 		goto out_failed;
5935 	}
5936 	return 0;
5937 out_failed:
5938 	return -1;
5939 }
5940 
5941 
5942 /**
5943  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5944  * @mrioc: Adapter instance reference
5945  * @ioc_status: Pointer to return ioc status
5946  * @exp_pg0: Pointer to return SAS Expander page 0
5947  * @pg_sz: Size of the memory allocated to the page pointer
5948  * @form: The form to be used for addressing the page
5949  * @form_spec: Form specific information like device handle
5950  *
5951  * This is handler for config page read for a specific SAS
5952  * Expander page0. The ioc_status has the controller returned
5953  * ioc_status. This routine doesn't check ioc_status to decide
5954  * whether the page read is success or not and it is the callers
5955  * responsibility.
5956  *
5957  * Return: 0 on success, non-zero on failure.
5958  */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)5959 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5960 	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5961 	u32 form_spec)
5962 {
5963 	struct mpi3_config_page_header cfg_hdr;
5964 	struct mpi3_config_request cfg_req;
5965 	u32 page_address;
5966 
5967 	memset(exp_pg0, 0, pg_sz);
5968 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5969 	memset(&cfg_req, 0, sizeof(cfg_req));
5970 
5971 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5972 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5973 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5974 	cfg_req.page_number = 0;
5975 	cfg_req.page_address = 0;
5976 
5977 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5978 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5979 		ioc_err(mrioc, "expander page0 header read failed\n");
5980 		goto out_failed;
5981 	}
5982 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5983 		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5984 		    *ioc_status);
5985 		goto out_failed;
5986 	}
5987 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5988 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5989 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5990 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5991 	cfg_req.page_address = cpu_to_le32(page_address);
5992 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5993 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
5994 		ioc_err(mrioc, "expander page0 read failed\n");
5995 		goto out_failed;
5996 	}
5997 	return 0;
5998 out_failed:
5999 	return -1;
6000 }
6001 
6002 /**
6003  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
6004  * @mrioc: Adapter instance reference
6005  * @ioc_status: Pointer to return ioc status
6006  * @exp_pg1: Pointer to return SAS Expander page 1
6007  * @pg_sz: Size of the memory allocated to the page pointer
6008  * @form: The form to be used for addressing the page
6009  * @form_spec: Form specific information like phy number
6010  *
6011  * This is handler for config page read for a specific SAS
6012  * Expander page1. The ioc_status has the controller returned
6013  * ioc_status. This routine doesn't check ioc_status to decide
6014  * whether the page read is success or not and it is the callers
6015  * responsibility.
6016  *
6017  * Return: 0 on success, non-zero on failure.
6018  */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)6019 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6020 	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6021 	u32 form_spec)
6022 {
6023 	struct mpi3_config_page_header cfg_hdr;
6024 	struct mpi3_config_request cfg_req;
6025 	u32 page_address;
6026 
6027 	memset(exp_pg1, 0, pg_sz);
6028 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6029 	memset(&cfg_req, 0, sizeof(cfg_req));
6030 
6031 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6032 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6033 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6034 	cfg_req.page_number = 1;
6035 	cfg_req.page_address = 0;
6036 
6037 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6038 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6039 		ioc_err(mrioc, "expander page1 header read failed\n");
6040 		goto out_failed;
6041 	}
6042 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6043 		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6044 		    *ioc_status);
6045 		goto out_failed;
6046 	}
6047 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6048 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6049 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6050 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6051 	cfg_req.page_address = cpu_to_le32(page_address);
6052 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6053 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6054 		ioc_err(mrioc, "expander page1 read failed\n");
6055 		goto out_failed;
6056 	}
6057 	return 0;
6058 out_failed:
6059 	return -1;
6060 }
6061 
6062 /**
6063  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6064  * @mrioc: Adapter instance reference
6065  * @ioc_status: Pointer to return ioc status
6066  * @encl_pg0: Pointer to return Enclosure page 0
6067  * @pg_sz: Size of the memory allocated to the page pointer
6068  * @form: The form to be used for addressing the page
6069  * @form_spec: Form specific information like device handle
6070  *
6071  * This is handler for config page read for a specific Enclosure
6072  * page0. The ioc_status has the controller returned ioc_status.
6073  * This routine doesn't check ioc_status to decide whether the
6074  * page read is success or not and it is the callers
6075  * responsibility.
6076  *
6077  * Return: 0 on success, non-zero on failure.
6078  */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6079 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6080 	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6081 	u32 form_spec)
6082 {
6083 	struct mpi3_config_page_header cfg_hdr;
6084 	struct mpi3_config_request cfg_req;
6085 	u32 page_address;
6086 
6087 	memset(encl_pg0, 0, pg_sz);
6088 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6089 	memset(&cfg_req, 0, sizeof(cfg_req));
6090 
6091 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6092 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6093 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6094 	cfg_req.page_number = 0;
6095 	cfg_req.page_address = 0;
6096 
6097 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6098 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6099 		ioc_err(mrioc, "enclosure page0 header read failed\n");
6100 		goto out_failed;
6101 	}
6102 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6103 		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6104 		    *ioc_status);
6105 		goto out_failed;
6106 	}
6107 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6108 	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6109 	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6110 	cfg_req.page_address = cpu_to_le32(page_address);
6111 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6112 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6113 		ioc_err(mrioc, "enclosure page0 read failed\n");
6114 		goto out_failed;
6115 	}
6116 	return 0;
6117 out_failed:
6118 	return -1;
6119 }
6120 
6121 
6122 /**
6123  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6124  * @mrioc: Adapter instance reference
6125  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6126  * @pg_sz: Size of the memory allocated to the page pointer
6127  *
6128  * This is handler for config page read for the SAS IO Unit
6129  * page0. This routine checks ioc_status to decide whether the
6130  * page read is success or not.
6131  *
6132  * Return: 0 on success, non-zero on failure.
6133  */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6134 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6135 	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6136 {
6137 	struct mpi3_config_page_header cfg_hdr;
6138 	struct mpi3_config_request cfg_req;
6139 	u16 ioc_status = 0;
6140 
6141 	memset(sas_io_unit_pg0, 0, pg_sz);
6142 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6143 	memset(&cfg_req, 0, sizeof(cfg_req));
6144 
6145 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6146 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6147 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6148 	cfg_req.page_number = 0;
6149 	cfg_req.page_address = 0;
6150 
6151 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6152 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6153 		ioc_err(mrioc, "sas io unit page0 header read failed\n");
6154 		goto out_failed;
6155 	}
6156 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6157 		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6158 		    ioc_status);
6159 		goto out_failed;
6160 	}
6161 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6162 
6163 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6164 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6165 		ioc_err(mrioc, "sas io unit page0 read failed\n");
6166 		goto out_failed;
6167 	}
6168 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6169 		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6170 		    ioc_status);
6171 		goto out_failed;
6172 	}
6173 	return 0;
6174 out_failed:
6175 	return -1;
6176 }
6177 
6178 /**
6179  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6180  * @mrioc: Adapter instance reference
6181  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6182  * @pg_sz: Size of the memory allocated to the page pointer
6183  *
6184  * This is handler for config page read for the SAS IO Unit
6185  * page1. This routine checks ioc_status to decide whether the
6186  * page read is success or not.
6187  *
6188  * Return: 0 on success, non-zero on failure.
6189  */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6190 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6191 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6192 {
6193 	struct mpi3_config_page_header cfg_hdr;
6194 	struct mpi3_config_request cfg_req;
6195 	u16 ioc_status = 0;
6196 
6197 	memset(sas_io_unit_pg1, 0, pg_sz);
6198 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6199 	memset(&cfg_req, 0, sizeof(cfg_req));
6200 
6201 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6202 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6203 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6204 	cfg_req.page_number = 1;
6205 	cfg_req.page_address = 0;
6206 
6207 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6208 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6209 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6210 		goto out_failed;
6211 	}
6212 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6213 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6214 		    ioc_status);
6215 		goto out_failed;
6216 	}
6217 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6218 
6219 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6220 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6221 		ioc_err(mrioc, "sas io unit page1 read failed\n");
6222 		goto out_failed;
6223 	}
6224 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6225 		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6226 		    ioc_status);
6227 		goto out_failed;
6228 	}
6229 	return 0;
6230 out_failed:
6231 	return -1;
6232 }
6233 
6234 /**
6235  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6236  * @mrioc: Adapter instance reference
6237  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6238  * @pg_sz: Size of the memory allocated to the page pointer
6239  *
6240  * This is handler for config page write for the SAS IO Unit
6241  * page1. This routine checks ioc_status to decide whether the
6242  * page read is success or not. This will modify both current
6243  * and persistent page.
6244  *
6245  * Return: 0 on success, non-zero on failure.
6246  */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6247 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6248 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6249 {
6250 	struct mpi3_config_page_header cfg_hdr;
6251 	struct mpi3_config_request cfg_req;
6252 	u16 ioc_status = 0;
6253 
6254 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6255 	memset(&cfg_req, 0, sizeof(cfg_req));
6256 
6257 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6258 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6259 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6260 	cfg_req.page_number = 1;
6261 	cfg_req.page_address = 0;
6262 
6263 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6264 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6265 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6266 		goto out_failed;
6267 	}
6268 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6269 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6270 		    ioc_status);
6271 		goto out_failed;
6272 	}
6273 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6274 
6275 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6276 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6277 		ioc_err(mrioc, "sas io unit page1 write current failed\n");
6278 		goto out_failed;
6279 	}
6280 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6281 		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6282 		    ioc_status);
6283 		goto out_failed;
6284 	}
6285 
6286 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6287 
6288 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6289 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6290 		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6291 		goto out_failed;
6292 	}
6293 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6294 		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6295 		    ioc_status);
6296 		goto out_failed;
6297 	}
6298 	return 0;
6299 out_failed:
6300 	return -1;
6301 }
6302 
6303 /**
6304  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6305  * @mrioc: Adapter instance reference
6306  * @driver_pg1: Pointer to return Driver page 1
6307  * @pg_sz: Size of the memory allocated to the page pointer
6308  *
6309  * This is handler for config page read for the Driver page1.
6310  * This routine checks ioc_status to decide whether the page
6311  * read is success or not.
6312  *
6313  * Return: 0 on success, non-zero on failure.
6314  */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6315 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6316 	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6317 {
6318 	struct mpi3_config_page_header cfg_hdr;
6319 	struct mpi3_config_request cfg_req;
6320 	u16 ioc_status = 0;
6321 
6322 	memset(driver_pg1, 0, pg_sz);
6323 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6324 	memset(&cfg_req, 0, sizeof(cfg_req));
6325 
6326 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6327 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6328 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6329 	cfg_req.page_number = 1;
6330 	cfg_req.page_address = 0;
6331 
6332 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6333 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6334 		ioc_err(mrioc, "driver page1 header read failed\n");
6335 		goto out_failed;
6336 	}
6337 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6338 		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6339 		    ioc_status);
6340 		goto out_failed;
6341 	}
6342 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6343 
6344 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6345 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6346 		ioc_err(mrioc, "driver page1 read failed\n");
6347 		goto out_failed;
6348 	}
6349 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6350 		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6351 		    ioc_status);
6352 		goto out_failed;
6353 	}
6354 	return 0;
6355 out_failed:
6356 	return -1;
6357 }
6358 
6359 /**
6360  * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6361  * @mrioc: Adapter instance reference
6362  * @driver_pg2: Pointer to return driver page 2
6363  * @pg_sz: Size of the memory allocated to the page pointer
6364  * @page_action: Page action
6365  *
6366  * This is handler for config page read for the driver page2.
6367  * This routine checks ioc_status to decide whether the page
6368  * read is success or not.
6369  *
6370  * Return: 0 on success, non-zero on failure.
6371  */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6372 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6373 	struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6374 {
6375 	struct mpi3_config_page_header cfg_hdr;
6376 	struct mpi3_config_request cfg_req;
6377 	u16 ioc_status = 0;
6378 
6379 	memset(driver_pg2, 0, pg_sz);
6380 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6381 	memset(&cfg_req, 0, sizeof(cfg_req));
6382 
6383 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6384 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6385 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6386 	cfg_req.page_number = 2;
6387 	cfg_req.page_address = 0;
6388 	cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6389 
6390 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6391 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6392 		ioc_err(mrioc, "driver page2 header read failed\n");
6393 		goto out_failed;
6394 	}
6395 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6396 		ioc_err(mrioc, "driver page2 header read failed with\n"
6397 			       "ioc_status(0x%04x)\n",
6398 		    ioc_status);
6399 		goto out_failed;
6400 	}
6401 	cfg_req.action = page_action;
6402 
6403 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6404 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6405 		ioc_err(mrioc, "driver page2 read failed\n");
6406 		goto out_failed;
6407 	}
6408 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6409 		ioc_err(mrioc, "driver page2 read failed with\n"
6410 			       "ioc_status(0x%04x)\n",
6411 		    ioc_status);
6412 		goto out_failed;
6413 	}
6414 	return 0;
6415 out_failed:
6416 	return -1;
6417 }
6418 
6419