xref: /linux/drivers/scsi/mpt3sas/mpt3sas_scsih.c (revision 0074281bb6316108e0cff094bd4db78ab3eee236)
1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/raid_class.h>
56 #include <linux/unaligned.h>
57 
58 #include "mpt3sas_base.h"
59 
60 #define RAID_CHANNEL 1
61 
62 #define PCIE_CHANNEL 2
63 
64 /* forward proto's */
65 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
66 	struct _sas_node *sas_expander);
67 static void _firmware_event_work(struct work_struct *work);
68 
69 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
70 	struct _sas_device *sas_device);
71 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
72 	u8 retry_count, u8 is_pd);
73 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
74 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
75 	struct _pcie_device *pcie_device);
76 static void
77 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
78 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
79 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
80 
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
85 
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
91 
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
101 static int mpt2_ids;
102 static int mpt3_ids;
103 
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
107 
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 	" bits for enabling additional logging info (default=0)");
112 
113 
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
117 
118 
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122 
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128 
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 		  1 - enumerates only SAS 2.0 generation HBAs\n \
134 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135 
136 /* diag_buffer_enable is bitwise
137  * bit 0 set = TRACE
138  * bit 1 set = SNAPSHOT
139  * bit 2 set = EXTENDED
140  *
141  * Either bit can be set, or both
142  */
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150 
151 
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156 
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 	"Enable sdev max qd as can_queue, def=disabled(0)");
161 
162 static int multipath_on_hba = -1;
163 module_param(multipath_on_hba, int, 0);
164 MODULE_PARM_DESC(multipath_on_hba,
165 	"Multipath support to add same target device\n\t\t"
166 	"as many times as it is visible to HBA from various paths\n\t\t"
167 	"(by default:\n\t\t"
168 	"\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
169 	"\t SAS 3.5 HBA - This will be enabled)");
170 
171 static int host_tagset_enable = 1;
172 module_param(host_tagset_enable, int, 0444);
173 MODULE_PARM_DESC(host_tagset_enable,
174 	"Shared host tagset enable/disable Default: enable(1)");
175 
176 /* raid transport support */
177 static struct raid_template *mpt3sas_raid_template;
178 static struct raid_template *mpt2sas_raid_template;
179 
180 
181 /**
182  * struct sense_info - common structure for obtaining sense keys
183  * @skey: sense key
184  * @asc: additional sense code
185  * @ascq: additional sense code qualifier
186  */
187 struct sense_info {
188 	u8 skey;
189 	u8 asc;
190 	u8 ascq;
191 };
192 
193 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
194 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
195 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
196 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
197 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
198 
199 /*
200  * SAS Log info code for a NCQ collateral abort after an NCQ error:
201  * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
202  * See: drivers/message/fusion/lsi/mpi_log_sas.h
203  */
204 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR	0x31080000
205 
206 /**
207  * struct fw_event_work - firmware event struct
208  * @list: link list framework
209  * @work: work object (ioc->fault_reset_work_q)
210  * @ioc: per adapter object
211  * @device_handle: device handle
212  * @VF_ID: virtual function id
213  * @VP_ID: virtual port id
214  * @ignore: flag meaning this event has been marked to ignore
215  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
216  * @refcount: kref for this event
217  * @event_data: reply event data payload follows
218  *
219  * This object stored on ioc->fw_event_list.
220  */
221 struct fw_event_work {
222 	struct list_head	list;
223 	struct work_struct	work;
224 
225 	struct MPT3SAS_ADAPTER *ioc;
226 	u16			device_handle;
227 	u8			VF_ID;
228 	u8			VP_ID;
229 	u8			ignore;
230 	u16			event;
231 	struct kref		refcount;
232 	char			event_data[] __aligned(4);
233 };
234 
fw_event_work_free(struct kref * r)235 static void fw_event_work_free(struct kref *r)
236 {
237 	kfree(container_of(r, struct fw_event_work, refcount));
238 }
239 
fw_event_work_get(struct fw_event_work * fw_work)240 static void fw_event_work_get(struct fw_event_work *fw_work)
241 {
242 	kref_get(&fw_work->refcount);
243 }
244 
fw_event_work_put(struct fw_event_work * fw_work)245 static void fw_event_work_put(struct fw_event_work *fw_work)
246 {
247 	kref_put(&fw_work->refcount, fw_event_work_free);
248 }
249 
alloc_fw_event_work(int len)250 static struct fw_event_work *alloc_fw_event_work(int len)
251 {
252 	struct fw_event_work *fw_event;
253 
254 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
255 	if (!fw_event)
256 		return NULL;
257 
258 	kref_init(&fw_event->refcount);
259 	return fw_event;
260 }
261 
262 /**
263  * struct _scsi_io_transfer - scsi io transfer
264  * @handle: sas device handle (assigned by firmware)
265  * @is_raid: flag set for hidden raid components
266  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
267  * @data_length: data transfer length
268  * @data_dma: dma pointer to data
269  * @sense: sense data
270  * @lun: lun number
271  * @cdb_length: cdb length
272  * @cdb: cdb contents
273  * @timeout: timeout for this command
274  * @VF_ID: virtual function id
275  * @VP_ID: virtual port id
276  * @valid_reply: flag set for reply message
277  * @sense_length: sense length
278  * @ioc_status: ioc status
279  * @scsi_state: scsi state
280  * @scsi_status: scsi staus
281  * @log_info: log information
282  * @transfer_length: data length transfer when there is a reply message
283  *
284  * Used for sending internal scsi commands to devices within this module.
285  * Refer to _scsi_send_scsi_io().
286  */
287 struct _scsi_io_transfer {
288 	u16	handle;
289 	u8	is_raid;
290 	enum dma_data_direction dir;
291 	u32	data_length;
292 	dma_addr_t data_dma;
293 	u8	sense[SCSI_SENSE_BUFFERSIZE];
294 	u32	lun;
295 	u8	cdb_length;
296 	u8	cdb[32];
297 	u8	timeout;
298 	u8	VF_ID;
299 	u8	VP_ID;
300 	u8	valid_reply;
301   /* the following bits are only valid when 'valid_reply = 1' */
302 	u32	sense_length;
303 	u16	ioc_status;
304 	u8	scsi_state;
305 	u8	scsi_status;
306 	u32	log_info;
307 	u32	transfer_length;
308 };
309 
310 /**
311  * _scsih_set_debug_level - global setting of ioc->logging_level.
312  * @val: value of the parameter to be set
313  * @kp: pointer to kernel_param structure
314  *
315  * Note: The logging levels are defined in mpt3sas_debug.h.
316  */
317 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)318 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
319 {
320 	int ret = param_set_int(val, kp);
321 	struct MPT3SAS_ADAPTER *ioc;
322 
323 	if (ret)
324 		return ret;
325 
326 	pr_info("setting logging_level(0x%08x)\n", logging_level);
327 	spin_lock(&gioc_lock);
328 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
329 		ioc->logging_level = logging_level;
330 	spin_unlock(&gioc_lock);
331 	return 0;
332 }
333 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
334 	&logging_level, 0644);
335 
336 /**
337  * _scsih_srch_boot_sas_address - search based on sas_address
338  * @sas_address: sas address
339  * @boot_device: boot device object from bios page 2
340  *
341  * Return: 1 when there's a match, 0 means no match.
342  */
343 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)344 _scsih_srch_boot_sas_address(u64 sas_address,
345 	Mpi2BootDeviceSasWwid_t *boot_device)
346 {
347 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
348 }
349 
350 /**
351  * _scsih_srch_boot_device_name - search based on device name
352  * @device_name: device name specified in INDENTIFY fram
353  * @boot_device: boot device object from bios page 2
354  *
355  * Return: 1 when there's a match, 0 means no match.
356  */
357 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)358 _scsih_srch_boot_device_name(u64 device_name,
359 	Mpi2BootDeviceDeviceName_t *boot_device)
360 {
361 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
362 }
363 
364 /**
365  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
366  * @enclosure_logical_id: enclosure logical id
367  * @slot_number: slot number
368  * @boot_device: boot device object from bios page 2
369  *
370  * Return: 1 when there's a match, 0 means no match.
371  */
372 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)373 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
374 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
375 {
376 	return (enclosure_logical_id == le64_to_cpu(boot_device->
377 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
378 	    SlotNumber)) ? 1 : 0;
379 }
380 
381 /**
382  * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
383  *			  port number from port list
384  * @ioc: per adapter object
385  * @port_id: port number
386  * @bypass_dirty_port_flag: when set look the matching hba port entry even
387  *			if hba port entry is marked as dirty.
388  *
389  * Search for hba port entry corresponding to provided port number,
390  * if available return port object otherwise return NULL.
391  */
392 struct hba_port *
mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 bypass_dirty_port_flag)393 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
394 	u8 port_id, u8 bypass_dirty_port_flag)
395 {
396 	struct hba_port *port, *port_next;
397 
398 	/*
399 	 * When multipath_on_hba is disabled then
400 	 * search the hba_port entry using default
401 	 * port id i.e. 255
402 	 */
403 	if (!ioc->multipath_on_hba)
404 		port_id = MULTIPATH_DISABLED_PORT_ID;
405 
406 	list_for_each_entry_safe(port, port_next,
407 	    &ioc->port_table_list, list) {
408 		if (port->port_id != port_id)
409 			continue;
410 		if (bypass_dirty_port_flag)
411 			return port;
412 		if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
413 			continue;
414 		return port;
415 	}
416 
417 	/*
418 	 * Allocate hba_port object for default port id (i.e. 255)
419 	 * when multipath_on_hba is disabled for the HBA.
420 	 * And add this object to port_table_list.
421 	 */
422 	if (!ioc->multipath_on_hba) {
423 		port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
424 		if (!port)
425 			return NULL;
426 
427 		port->port_id = port_id;
428 		ioc_info(ioc,
429 		   "hba_port entry: %p, port: %d is added to hba_port list\n",
430 		   port, port->port_id);
431 		list_add_tail(&port->list,
432 		    &ioc->port_table_list);
433 		return port;
434 	}
435 	return NULL;
436 }
437 
438 /**
439  * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
440  * @ioc: per adapter object
441  * @port: hba_port object
442  * @phy: phy number
443  *
444  * Return virtual_phy object corresponding to phy number.
445  */
446 struct virtual_phy *
mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port,u32 phy)447 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
448 	struct hba_port *port, u32 phy)
449 {
450 	struct virtual_phy *vphy, *vphy_next;
451 
452 	if (!port->vphys_mask)
453 		return NULL;
454 
455 	list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
456 		if (vphy->phy_mask & (1 << phy))
457 			return vphy;
458 	}
459 	return NULL;
460 }
461 
462 /**
463  * _scsih_is_boot_device - search for matching boot device.
464  * @sas_address: sas address
465  * @device_name: device name specified in INDENTIFY fram
466  * @enclosure_logical_id: enclosure logical id
467  * @slot: slot number
468  * @form: specifies boot device form
469  * @boot_device: boot device object from bios page 2
470  *
471  * Return: 1 when there's a match, 0 means no match.
472  */
473 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)474 _scsih_is_boot_device(u64 sas_address, u64 device_name,
475 	u64 enclosure_logical_id, u16 slot, u8 form,
476 	Mpi2BiosPage2BootDevice_t *boot_device)
477 {
478 	int rc = 0;
479 
480 	switch (form) {
481 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
482 		if (!sas_address)
483 			break;
484 		rc = _scsih_srch_boot_sas_address(
485 		    sas_address, &boot_device->SasWwid);
486 		break;
487 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
488 		if (!enclosure_logical_id)
489 			break;
490 		rc = _scsih_srch_boot_encl_slot(
491 		    enclosure_logical_id,
492 		    slot, &boot_device->EnclosureSlot);
493 		break;
494 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
495 		if (!device_name)
496 			break;
497 		rc = _scsih_srch_boot_device_name(
498 		    device_name, &boot_device->DeviceName);
499 		break;
500 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
501 		break;
502 	}
503 
504 	return rc;
505 }
506 
507 /**
508  * _scsih_get_sas_address - set the sas_address for given device handle
509  * @ioc: ?
510  * @handle: device handle
511  * @sas_address: sas address
512  *
513  * Return: 0 success, non-zero when failure
514  */
515 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)516 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
517 	u64 *sas_address)
518 {
519 	Mpi2SasDevicePage0_t sas_device_pg0;
520 	Mpi2ConfigReply_t mpi_reply;
521 	u32 ioc_status;
522 
523 	*sas_address = 0;
524 
525 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
526 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
527 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
528 			__FILE__, __LINE__, __func__);
529 		return -ENXIO;
530 	}
531 
532 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
533 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
534 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
535 		 * vSES's sas address.
536 		 */
537 		if ((handle <= ioc->sas_hba.num_phys) &&
538 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
539 		   MPI2_SAS_DEVICE_INFO_SEP)))
540 			*sas_address = ioc->sas_hba.sas_address;
541 		else
542 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
543 		return 0;
544 	}
545 
546 	/* we hit this because the given parent handle doesn't exist */
547 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
548 		return -ENXIO;
549 
550 	/* else error case */
551 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
552 		handle, ioc_status, __FILE__, __LINE__, __func__);
553 	return -EIO;
554 }
555 
556 /**
557  * _scsih_determine_boot_device - determine boot device.
558  * @ioc: per adapter object
559  * @device: sas_device or pcie_device object
560  * @channel: SAS or PCIe channel
561  *
562  * Determines whether this device should be first reported device to
563  * to scsi-ml or sas transport, this purpose is for persistent boot device.
564  * There are primary, alternate, and current entries in bios page 2. The order
565  * priority is primary, alternate, then current.  This routine saves
566  * the corresponding device object.
567  * The saved data to be used later in _scsih_probe_boot_devices().
568  */
569 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)570 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
571 	u32 channel)
572 {
573 	struct _sas_device *sas_device;
574 	struct _pcie_device *pcie_device;
575 	struct _raid_device *raid_device;
576 	u64 sas_address;
577 	u64 device_name;
578 	u64 enclosure_logical_id;
579 	u16 slot;
580 
581 	 /* only process this function when driver loads */
582 	if (!ioc->is_driver_loading)
583 		return;
584 
585 	 /* no Bios, return immediately */
586 	if (!ioc->bios_pg3.BiosVersion)
587 		return;
588 
589 	if (channel == RAID_CHANNEL) {
590 		raid_device = device;
591 		sas_address = raid_device->wwid;
592 		device_name = 0;
593 		enclosure_logical_id = 0;
594 		slot = 0;
595 	} else if (channel == PCIE_CHANNEL) {
596 		pcie_device = device;
597 		sas_address = pcie_device->wwid;
598 		device_name = 0;
599 		enclosure_logical_id = 0;
600 		slot = 0;
601 	} else {
602 		sas_device = device;
603 		sas_address = sas_device->sas_address;
604 		device_name = sas_device->device_name;
605 		enclosure_logical_id = sas_device->enclosure_logical_id;
606 		slot = sas_device->slot;
607 	}
608 
609 	if (!ioc->req_boot_device.device) {
610 		if (_scsih_is_boot_device(sas_address, device_name,
611 		    enclosure_logical_id, slot,
612 		    (ioc->bios_pg2.ReqBootDeviceForm &
613 		    MPI2_BIOSPAGE2_FORM_MASK),
614 		    &ioc->bios_pg2.RequestedBootDevice)) {
615 			dinitprintk(ioc,
616 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
617 					     __func__, (u64)sas_address));
618 			ioc->req_boot_device.device = device;
619 			ioc->req_boot_device.channel = channel;
620 		}
621 	}
622 
623 	if (!ioc->req_alt_boot_device.device) {
624 		if (_scsih_is_boot_device(sas_address, device_name,
625 		    enclosure_logical_id, slot,
626 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
627 		    MPI2_BIOSPAGE2_FORM_MASK),
628 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
629 			dinitprintk(ioc,
630 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
631 					     __func__, (u64)sas_address));
632 			ioc->req_alt_boot_device.device = device;
633 			ioc->req_alt_boot_device.channel = channel;
634 		}
635 	}
636 
637 	if (!ioc->current_boot_device.device) {
638 		if (_scsih_is_boot_device(sas_address, device_name,
639 		    enclosure_logical_id, slot,
640 		    (ioc->bios_pg2.CurrentBootDeviceForm &
641 		    MPI2_BIOSPAGE2_FORM_MASK),
642 		    &ioc->bios_pg2.CurrentBootDevice)) {
643 			dinitprintk(ioc,
644 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
645 					     __func__, (u64)sas_address));
646 			ioc->current_boot_device.device = device;
647 			ioc->current_boot_device.channel = channel;
648 		}
649 	}
650 }
651 
652 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)653 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
654 		struct MPT3SAS_TARGET *tgt_priv)
655 {
656 	struct _sas_device *ret;
657 
658 	assert_spin_locked(&ioc->sas_device_lock);
659 
660 	ret = tgt_priv->sas_dev;
661 	if (ret)
662 		sas_device_get(ret);
663 
664 	return ret;
665 }
666 
667 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)668 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
669 		struct MPT3SAS_TARGET *tgt_priv)
670 {
671 	struct _sas_device *ret;
672 	unsigned long flags;
673 
674 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
675 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
676 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
677 
678 	return ret;
679 }
680 
681 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)682 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
683 	struct MPT3SAS_TARGET *tgt_priv)
684 {
685 	struct _pcie_device *ret;
686 
687 	assert_spin_locked(&ioc->pcie_device_lock);
688 
689 	ret = tgt_priv->pcie_dev;
690 	if (ret)
691 		pcie_device_get(ret);
692 
693 	return ret;
694 }
695 
696 /**
697  * mpt3sas_get_pdev_from_target - pcie device search
698  * @ioc: per adapter object
699  * @tgt_priv: starget private object
700  *
701  * Context: This function will acquire ioc->pcie_device_lock and will release
702  * before returning the pcie_device object.
703  *
704  * This searches for pcie_device from target, then return pcie_device object.
705  */
706 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)707 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
708 	struct MPT3SAS_TARGET *tgt_priv)
709 {
710 	struct _pcie_device *ret;
711 	unsigned long flags;
712 
713 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
714 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
715 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
716 
717 	return ret;
718 }
719 
720 
721 /**
722  * __mpt3sas_get_sdev_by_rphy - sas device search
723  * @ioc: per adapter object
724  * @rphy: sas_rphy pointer
725  *
726  * Context: This function will acquire ioc->sas_device_lock and will release
727  * before returning the sas_device object.
728  *
729  * This searches for sas_device from rphy object
730  * then return sas_device object.
731  */
732 struct _sas_device *
__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER * ioc,struct sas_rphy * rphy)733 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
734 	struct sas_rphy *rphy)
735 {
736 	struct _sas_device *sas_device;
737 
738 	assert_spin_locked(&ioc->sas_device_lock);
739 
740 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
741 		if (sas_device->rphy != rphy)
742 			continue;
743 		sas_device_get(sas_device);
744 		return sas_device;
745 	}
746 
747 	sas_device = NULL;
748 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
749 		if (sas_device->rphy != rphy)
750 			continue;
751 		sas_device_get(sas_device);
752 		return sas_device;
753 	}
754 
755 	return NULL;
756 }
757 
758 /**
759  * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
760  *				sas address from sas_device_list list
761  * @ioc: per adapter object
762  * @sas_address: device sas address
763  * @port: port number
764  *
765  * Search for _sas_device object corresponding to provided sas address,
766  * if available return _sas_device object address otherwise return NULL.
767  */
768 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)769 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
770 	u64 sas_address, struct hba_port *port)
771 {
772 	struct _sas_device *sas_device;
773 
774 	if (!port)
775 		return NULL;
776 
777 	assert_spin_locked(&ioc->sas_device_lock);
778 
779 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
780 		if (sas_device->sas_address != sas_address)
781 			continue;
782 		if (sas_device->port != port)
783 			continue;
784 		sas_device_get(sas_device);
785 		return sas_device;
786 	}
787 
788 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
789 		if (sas_device->sas_address != sas_address)
790 			continue;
791 		if (sas_device->port != port)
792 			continue;
793 		sas_device_get(sas_device);
794 		return sas_device;
795 	}
796 
797 	return NULL;
798 }
799 
800 /**
801  * mpt3sas_get_sdev_by_addr - sas device search
802  * @ioc: per adapter object
803  * @sas_address: sas address
804  * @port: hba port entry
805  * Context: Calling function should acquire ioc->sas_device_lock
806  *
807  * This searches for sas_device based on sas_address & port number,
808  * then return sas_device object.
809  */
810 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)811 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
812 	u64 sas_address, struct hba_port *port)
813 {
814 	struct _sas_device *sas_device;
815 	unsigned long flags;
816 
817 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
818 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
819 	    sas_address, port);
820 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
821 
822 	return sas_device;
823 }
824 
825 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)826 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
827 {
828 	struct _sas_device *sas_device;
829 
830 	assert_spin_locked(&ioc->sas_device_lock);
831 
832 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
833 		if (sas_device->handle == handle)
834 			goto found_device;
835 
836 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
837 		if (sas_device->handle == handle)
838 			goto found_device;
839 
840 	return NULL;
841 
842 found_device:
843 	sas_device_get(sas_device);
844 	return sas_device;
845 }
846 
847 /**
848  * mpt3sas_get_sdev_by_handle - sas device search
849  * @ioc: per adapter object
850  * @handle: sas device handle (assigned by firmware)
851  * Context: Calling function should acquire ioc->sas_device_lock
852  *
853  * This searches for sas_device based on sas_address, then return sas_device
854  * object.
855  */
856 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)857 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
858 {
859 	struct _sas_device *sas_device;
860 	unsigned long flags;
861 
862 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
863 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
864 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
865 
866 	return sas_device;
867 }
868 
869 /**
870  * _scsih_display_enclosure_chassis_info - display device location info
871  * @ioc: per adapter object
872  * @sas_device: per sas device object
873  * @sdev: scsi device struct
874  * @starget: scsi target struct
875  */
876 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)877 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
878 	struct _sas_device *sas_device, struct scsi_device *sdev,
879 	struct scsi_target *starget)
880 {
881 	if (sdev) {
882 		if (sas_device->enclosure_handle != 0)
883 			sdev_printk(KERN_INFO, sdev,
884 			    "enclosure logical id (0x%016llx), slot(%d) \n",
885 			    (unsigned long long)
886 			    sas_device->enclosure_logical_id,
887 			    sas_device->slot);
888 		if (sas_device->connector_name[0] != '\0')
889 			sdev_printk(KERN_INFO, sdev,
890 			    "enclosure level(0x%04x), connector name( %s)\n",
891 			    sas_device->enclosure_level,
892 			    sas_device->connector_name);
893 		if (sas_device->is_chassis_slot_valid)
894 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
895 			    sas_device->chassis_slot);
896 	} else if (starget) {
897 		if (sas_device->enclosure_handle != 0)
898 			starget_printk(KERN_INFO, starget,
899 			    "enclosure logical id(0x%016llx), slot(%d) \n",
900 			    (unsigned long long)
901 			    sas_device->enclosure_logical_id,
902 			    sas_device->slot);
903 		if (sas_device->connector_name[0] != '\0')
904 			starget_printk(KERN_INFO, starget,
905 			    "enclosure level(0x%04x), connector name( %s)\n",
906 			    sas_device->enclosure_level,
907 			    sas_device->connector_name);
908 		if (sas_device->is_chassis_slot_valid)
909 			starget_printk(KERN_INFO, starget,
910 			    "chassis slot(0x%04x)\n",
911 			    sas_device->chassis_slot);
912 	} else {
913 		if (sas_device->enclosure_handle != 0)
914 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
915 				 (u64)sas_device->enclosure_logical_id,
916 				 sas_device->slot);
917 		if (sas_device->connector_name[0] != '\0')
918 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
919 				 sas_device->enclosure_level,
920 				 sas_device->connector_name);
921 		if (sas_device->is_chassis_slot_valid)
922 			ioc_info(ioc, "chassis slot(0x%04x)\n",
923 				 sas_device->chassis_slot);
924 	}
925 }
926 
927 /**
928  * _scsih_sas_device_remove - remove sas_device from list.
929  * @ioc: per adapter object
930  * @sas_device: the sas_device object
931  * Context: This function will acquire ioc->sas_device_lock.
932  *
933  * If sas_device is on the list, remove it and decrement its reference count.
934  */
935 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)936 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
937 	struct _sas_device *sas_device)
938 {
939 	unsigned long flags;
940 
941 	if (!sas_device)
942 		return;
943 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
944 		 sas_device->handle, (u64)sas_device->sas_address);
945 
946 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
947 
948 	/*
949 	 * The lock serializes access to the list, but we still need to verify
950 	 * that nobody removed the entry while we were waiting on the lock.
951 	 */
952 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
953 	if (!list_empty(&sas_device->list)) {
954 		list_del_init(&sas_device->list);
955 		sas_device_put(sas_device);
956 	}
957 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
958 }
959 
960 /**
961  * _scsih_device_remove_by_handle - removing device object by handle
962  * @ioc: per adapter object
963  * @handle: device handle
964  */
965 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)966 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
967 {
968 	struct _sas_device *sas_device;
969 	unsigned long flags;
970 
971 	if (ioc->shost_recovery)
972 		return;
973 
974 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
975 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
976 	if (sas_device) {
977 		list_del_init(&sas_device->list);
978 		sas_device_put(sas_device);
979 	}
980 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
981 	if (sas_device) {
982 		_scsih_remove_device(ioc, sas_device);
983 		sas_device_put(sas_device);
984 	}
985 }
986 
987 /**
988  * mpt3sas_device_remove_by_sas_address - removing device object by
989  *					sas address & port number
990  * @ioc: per adapter object
991  * @sas_address: device sas_address
992  * @port: hba port entry
993  *
994  * Return nothing.
995  */
996 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)997 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
998 	u64 sas_address, struct hba_port *port)
999 {
1000 	struct _sas_device *sas_device;
1001 	unsigned long flags;
1002 
1003 	if (ioc->shost_recovery)
1004 		return;
1005 
1006 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1007 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1008 	if (sas_device) {
1009 		list_del_init(&sas_device->list);
1010 		sas_device_put(sas_device);
1011 	}
1012 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1013 	if (sas_device) {
1014 		_scsih_remove_device(ioc, sas_device);
1015 		sas_device_put(sas_device);
1016 	}
1017 }
1018 
1019 /**
1020  * _scsih_sas_device_add - insert sas_device to the list.
1021  * @ioc: per adapter object
1022  * @sas_device: the sas_device object
1023  * Context: This function will acquire ioc->sas_device_lock.
1024  *
1025  * Adding new object to the ioc->sas_device_list.
1026  */
1027 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1028 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1029 	struct _sas_device *sas_device)
1030 {
1031 	unsigned long flags;
1032 
1033 	dewtprintk(ioc,
1034 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1035 			    __func__, sas_device->handle,
1036 			    (u64)sas_device->sas_address));
1037 
1038 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1039 	    NULL, NULL));
1040 
1041 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1042 	sas_device_get(sas_device);
1043 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
1044 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1045 
1046 	if (ioc->hide_drives) {
1047 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1048 		return;
1049 	}
1050 
1051 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1052 	     sas_device->sas_address_parent, sas_device->port)) {
1053 		_scsih_sas_device_remove(ioc, sas_device);
1054 	} else if (!sas_device->starget) {
1055 		/*
1056 		 * When asyn scanning is enabled, its not possible to remove
1057 		 * devices while scanning is turned on due to an oops in
1058 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1059 		 */
1060 		if (!ioc->is_driver_loading) {
1061 			mpt3sas_transport_port_remove(ioc,
1062 			    sas_device->sas_address,
1063 			    sas_device->sas_address_parent,
1064 			    sas_device->port);
1065 			_scsih_sas_device_remove(ioc, sas_device);
1066 		}
1067 	} else
1068 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1069 }
1070 
1071 /**
1072  * _scsih_sas_device_init_add - insert sas_device to the list.
1073  * @ioc: per adapter object
1074  * @sas_device: the sas_device object
1075  * Context: This function will acquire ioc->sas_device_lock.
1076  *
1077  * Adding new object at driver load time to the ioc->sas_device_init_list.
1078  */
1079 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1080 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1081 	struct _sas_device *sas_device)
1082 {
1083 	unsigned long flags;
1084 
1085 	dewtprintk(ioc,
1086 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1087 			    __func__, sas_device->handle,
1088 			    (u64)sas_device->sas_address));
1089 
1090 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1091 	    NULL, NULL));
1092 
1093 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1094 	sas_device_get(sas_device);
1095 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1096 	_scsih_determine_boot_device(ioc, sas_device, 0);
1097 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1098 }
1099 
1100 
1101 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1102 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1103 {
1104 	struct _pcie_device *pcie_device;
1105 
1106 	assert_spin_locked(&ioc->pcie_device_lock);
1107 
1108 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1109 		if (pcie_device->wwid == wwid)
1110 			goto found_device;
1111 
1112 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1113 		if (pcie_device->wwid == wwid)
1114 			goto found_device;
1115 
1116 	return NULL;
1117 
1118 found_device:
1119 	pcie_device_get(pcie_device);
1120 	return pcie_device;
1121 }
1122 
1123 
1124 /**
1125  * mpt3sas_get_pdev_by_wwid - pcie device search
1126  * @ioc: per adapter object
1127  * @wwid: wwid
1128  *
1129  * Context: This function will acquire ioc->pcie_device_lock and will release
1130  * before returning the pcie_device object.
1131  *
1132  * This searches for pcie_device based on wwid, then return pcie_device object.
1133  */
1134 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1135 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1136 {
1137 	struct _pcie_device *pcie_device;
1138 	unsigned long flags;
1139 
1140 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1141 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1142 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1143 
1144 	return pcie_device;
1145 }
1146 
1147 
1148 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1149 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1150 	int channel)
1151 {
1152 	struct _pcie_device *pcie_device;
1153 
1154 	assert_spin_locked(&ioc->pcie_device_lock);
1155 
1156 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1157 		if (pcie_device->id == id && pcie_device->channel == channel)
1158 			goto found_device;
1159 
1160 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1161 		if (pcie_device->id == id && pcie_device->channel == channel)
1162 			goto found_device;
1163 
1164 	return NULL;
1165 
1166 found_device:
1167 	pcie_device_get(pcie_device);
1168 	return pcie_device;
1169 }
1170 
1171 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1172 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1173 {
1174 	struct _pcie_device *pcie_device;
1175 
1176 	assert_spin_locked(&ioc->pcie_device_lock);
1177 
1178 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1179 		if (pcie_device->handle == handle)
1180 			goto found_device;
1181 
1182 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1183 		if (pcie_device->handle == handle)
1184 			goto found_device;
1185 
1186 	return NULL;
1187 
1188 found_device:
1189 	pcie_device_get(pcie_device);
1190 	return pcie_device;
1191 }
1192 
1193 
1194 /**
1195  * mpt3sas_get_pdev_by_handle - pcie device search
1196  * @ioc: per adapter object
1197  * @handle: Firmware device handle
1198  *
1199  * Context: This function will acquire ioc->pcie_device_lock and will release
1200  * before returning the pcie_device object.
1201  *
1202  * This searches for pcie_device based on handle, then return pcie_device
1203  * object.
1204  */
1205 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1206 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1207 {
1208 	struct _pcie_device *pcie_device;
1209 	unsigned long flags;
1210 
1211 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1212 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1213 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1214 
1215 	return pcie_device;
1216 }
1217 
1218 /**
1219  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1220  * @ioc: per adapter object
1221  * Context: This function will acquire ioc->pcie_device_lock
1222  *
1223  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1224  * which has reported maximum among all available NVMe drives.
1225  * Minimum max_shutdown_latency will be six seconds.
1226  */
1227 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1228 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1229 {
1230 	struct _pcie_device *pcie_device;
1231 	unsigned long flags;
1232 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1233 
1234 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1235 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1236 		if (pcie_device->shutdown_latency) {
1237 			if (shutdown_latency < pcie_device->shutdown_latency)
1238 				shutdown_latency =
1239 					pcie_device->shutdown_latency;
1240 		}
1241 	}
1242 	ioc->max_shutdown_latency = shutdown_latency;
1243 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1244 }
1245 
1246 /**
1247  * _scsih_pcie_device_remove - remove pcie_device from list.
1248  * @ioc: per adapter object
1249  * @pcie_device: the pcie_device object
1250  * Context: This function will acquire ioc->pcie_device_lock.
1251  *
1252  * If pcie_device is on the list, remove it and decrement its reference count.
1253  */
1254 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1255 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1256 	struct _pcie_device *pcie_device)
1257 {
1258 	unsigned long flags;
1259 	int was_on_pcie_device_list = 0;
1260 	u8 update_latency = 0;
1261 
1262 	if (!pcie_device)
1263 		return;
1264 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1265 		 pcie_device->handle, (u64)pcie_device->wwid);
1266 	if (pcie_device->enclosure_handle != 0)
1267 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1268 			 (u64)pcie_device->enclosure_logical_id,
1269 			 pcie_device->slot);
1270 	if (pcie_device->connector_name[0] != '\0')
1271 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1272 			 pcie_device->enclosure_level,
1273 			 pcie_device->connector_name);
1274 
1275 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1276 	if (!list_empty(&pcie_device->list)) {
1277 		list_del_init(&pcie_device->list);
1278 		was_on_pcie_device_list = 1;
1279 	}
1280 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1281 		update_latency = 1;
1282 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1283 	if (was_on_pcie_device_list) {
1284 		kfree(pcie_device->serial_number);
1285 		pcie_device_put(pcie_device);
1286 	}
1287 
1288 	/*
1289 	 * This device's RTD3 Entry Latency matches IOC's
1290 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1291 	 * from the available drives as current drive is getting removed.
1292 	 */
1293 	if (update_latency)
1294 		_scsih_set_nvme_max_shutdown_latency(ioc);
1295 }
1296 
1297 
1298 /**
1299  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1300  * @ioc: per adapter object
1301  * @handle: device handle
1302  */
1303 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1304 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1305 {
1306 	struct _pcie_device *pcie_device;
1307 	unsigned long flags;
1308 	int was_on_pcie_device_list = 0;
1309 	u8 update_latency = 0;
1310 
1311 	if (ioc->shost_recovery)
1312 		return;
1313 
1314 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1315 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1316 	if (pcie_device) {
1317 		if (!list_empty(&pcie_device->list)) {
1318 			list_del_init(&pcie_device->list);
1319 			was_on_pcie_device_list = 1;
1320 			pcie_device_put(pcie_device);
1321 		}
1322 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1323 			update_latency = 1;
1324 	}
1325 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1326 	if (was_on_pcie_device_list) {
1327 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1328 		pcie_device_put(pcie_device);
1329 	}
1330 
1331 	/*
1332 	 * This device's RTD3 Entry Latency matches IOC's
1333 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1334 	 * from the available drives as current drive is getting removed.
1335 	 */
1336 	if (update_latency)
1337 		_scsih_set_nvme_max_shutdown_latency(ioc);
1338 }
1339 
1340 /**
1341  * _scsih_pcie_device_add - add pcie_device object
1342  * @ioc: per adapter object
1343  * @pcie_device: pcie_device object
1344  *
1345  * This is added to the pcie_device_list link list.
1346  */
1347 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1348 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1349 	struct _pcie_device *pcie_device)
1350 {
1351 	unsigned long flags;
1352 
1353 	dewtprintk(ioc,
1354 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1355 			    __func__,
1356 			    pcie_device->handle, (u64)pcie_device->wwid));
1357 	if (pcie_device->enclosure_handle != 0)
1358 		dewtprintk(ioc,
1359 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1360 				    __func__,
1361 				    (u64)pcie_device->enclosure_logical_id,
1362 				    pcie_device->slot));
1363 	if (pcie_device->connector_name[0] != '\0')
1364 		dewtprintk(ioc,
1365 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1366 				    __func__, pcie_device->enclosure_level,
1367 				    pcie_device->connector_name));
1368 
1369 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1370 	pcie_device_get(pcie_device);
1371 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1372 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1373 
1374 	if (pcie_device->access_status ==
1375 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1376 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1377 		return;
1378 	}
1379 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1380 		_scsih_pcie_device_remove(ioc, pcie_device);
1381 	} else if (!pcie_device->starget) {
1382 		if (!ioc->is_driver_loading) {
1383 /*TODO-- Need to find out whether this condition will occur or not*/
1384 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1385 		}
1386 	} else
1387 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1388 }
1389 
1390 /*
1391  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1392  * @ioc: per adapter object
1393  * @pcie_device: the pcie_device object
1394  * Context: This function will acquire ioc->pcie_device_lock.
1395  *
1396  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1397  */
1398 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1399 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1400 				struct _pcie_device *pcie_device)
1401 {
1402 	unsigned long flags;
1403 
1404 	dewtprintk(ioc,
1405 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1406 			    __func__,
1407 			    pcie_device->handle, (u64)pcie_device->wwid));
1408 	if (pcie_device->enclosure_handle != 0)
1409 		dewtprintk(ioc,
1410 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1411 				    __func__,
1412 				    (u64)pcie_device->enclosure_logical_id,
1413 				    pcie_device->slot));
1414 	if (pcie_device->connector_name[0] != '\0')
1415 		dewtprintk(ioc,
1416 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1417 				    __func__, pcie_device->enclosure_level,
1418 				    pcie_device->connector_name));
1419 
1420 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1421 	pcie_device_get(pcie_device);
1422 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1423 	if (pcie_device->access_status !=
1424 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1425 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1426 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1427 }
1428 /**
1429  * _scsih_raid_device_find_by_id - raid device search
1430  * @ioc: per adapter object
1431  * @id: sas device target id
1432  * @channel: sas device channel
1433  * Context: Calling function should acquire ioc->raid_device_lock
1434  *
1435  * This searches for raid_device based on target id, then return raid_device
1436  * object.
1437  */
1438 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1439 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1440 {
1441 	struct _raid_device *raid_device, *r;
1442 
1443 	r = NULL;
1444 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1445 		if (raid_device->id == id && raid_device->channel == channel) {
1446 			r = raid_device;
1447 			goto out;
1448 		}
1449 	}
1450 
1451  out:
1452 	return r;
1453 }
1454 
1455 /**
1456  * mpt3sas_raid_device_find_by_handle - raid device search
1457  * @ioc: per adapter object
1458  * @handle: sas device handle (assigned by firmware)
1459  * Context: Calling function should acquire ioc->raid_device_lock
1460  *
1461  * This searches for raid_device based on handle, then return raid_device
1462  * object.
1463  */
1464 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1465 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1466 {
1467 	struct _raid_device *raid_device, *r;
1468 
1469 	r = NULL;
1470 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1471 		if (raid_device->handle != handle)
1472 			continue;
1473 		r = raid_device;
1474 		goto out;
1475 	}
1476 
1477  out:
1478 	return r;
1479 }
1480 
1481 /**
1482  * _scsih_raid_device_find_by_wwid - raid device search
1483  * @ioc: per adapter object
1484  * @wwid: ?
1485  * Context: Calling function should acquire ioc->raid_device_lock
1486  *
1487  * This searches for raid_device based on wwid, then return raid_device
1488  * object.
1489  */
1490 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1491 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1492 {
1493 	struct _raid_device *raid_device, *r;
1494 
1495 	r = NULL;
1496 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1497 		if (raid_device->wwid != wwid)
1498 			continue;
1499 		r = raid_device;
1500 		goto out;
1501 	}
1502 
1503  out:
1504 	return r;
1505 }
1506 
1507 /**
1508  * _scsih_raid_device_add - add raid_device object
1509  * @ioc: per adapter object
1510  * @raid_device: raid_device object
1511  *
1512  * This is added to the raid_device_list link list.
1513  */
1514 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1515 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1516 	struct _raid_device *raid_device)
1517 {
1518 	unsigned long flags;
1519 
1520 	dewtprintk(ioc,
1521 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1522 			    __func__,
1523 			    raid_device->handle, (u64)raid_device->wwid));
1524 
1525 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1526 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1527 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1528 }
1529 
1530 /**
1531  * _scsih_raid_device_remove - delete raid_device object
1532  * @ioc: per adapter object
1533  * @raid_device: raid_device object
1534  *
1535  */
1536 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1537 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1538 	struct _raid_device *raid_device)
1539 {
1540 	unsigned long flags;
1541 
1542 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1543 	list_del(&raid_device->list);
1544 	kfree(raid_device);
1545 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1546 }
1547 
1548 /**
1549  * mpt3sas_scsih_expander_find_by_handle - expander device search
1550  * @ioc: per adapter object
1551  * @handle: expander handle (assigned by firmware)
1552  * Context: Calling function should acquire ioc->sas_device_lock
1553  *
1554  * This searches for expander device based on handle, then returns the
1555  * sas_node object.
1556  */
1557 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1558 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1559 {
1560 	struct _sas_node *sas_expander, *r;
1561 
1562 	r = NULL;
1563 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1564 		if (sas_expander->handle != handle)
1565 			continue;
1566 		r = sas_expander;
1567 		goto out;
1568 	}
1569  out:
1570 	return r;
1571 }
1572 
1573 /**
1574  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1575  * @ioc: per adapter object
1576  * @handle: enclosure handle (assigned by firmware)
1577  * Context: Calling function should acquire ioc->sas_device_lock
1578  *
1579  * This searches for enclosure device based on handle, then returns the
1580  * enclosure object.
1581  */
1582 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1583 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1584 {
1585 	struct _enclosure_node *enclosure_dev, *r;
1586 
1587 	r = NULL;
1588 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1589 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1590 			continue;
1591 		r = enclosure_dev;
1592 		goto out;
1593 	}
1594 out:
1595 	return r;
1596 }
1597 /**
1598  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1599  * @ioc: per adapter object
1600  * @sas_address: sas address
1601  * @port: hba port entry
1602  * Context: Calling function should acquire ioc->sas_node_lock.
1603  *
1604  * This searches for expander device based on sas_address & port number,
1605  * then returns the sas_node object.
1606  */
1607 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1608 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1609 	u64 sas_address, struct hba_port *port)
1610 {
1611 	struct _sas_node *sas_expander, *r = NULL;
1612 
1613 	if (!port)
1614 		return r;
1615 
1616 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1617 		if (sas_expander->sas_address != sas_address)
1618 			continue;
1619 		if (sas_expander->port != port)
1620 			continue;
1621 		r = sas_expander;
1622 		goto out;
1623 	}
1624  out:
1625 	return r;
1626 }
1627 
1628 /**
1629  * _scsih_expander_node_add - insert expander device to the list.
1630  * @ioc: per adapter object
1631  * @sas_expander: the sas_device object
1632  * Context: This function will acquire ioc->sas_node_lock.
1633  *
1634  * Adding new object to the ioc->sas_expander_list.
1635  */
1636 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1637 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1638 	struct _sas_node *sas_expander)
1639 {
1640 	unsigned long flags;
1641 
1642 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1643 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1644 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1645 }
1646 
1647 /**
1648  * _scsih_is_end_device - determines if device is an end device
1649  * @device_info: bitfield providing information about the device.
1650  * Context: none
1651  *
1652  * Return: 1 if end device.
1653  */
1654 static int
_scsih_is_end_device(u32 device_info)1655 _scsih_is_end_device(u32 device_info)
1656 {
1657 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1658 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1659 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1660 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1661 		return 1;
1662 	else
1663 		return 0;
1664 }
1665 
1666 /**
1667  * _scsih_is_nvme_pciescsi_device - determines if
1668  *			device is an pcie nvme/scsi device
1669  * @device_info: bitfield providing information about the device.
1670  * Context: none
1671  *
1672  * Returns 1 if device is pcie device type nvme/scsi.
1673  */
1674 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1675 _scsih_is_nvme_pciescsi_device(u32 device_info)
1676 {
1677 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1678 	    == MPI26_PCIE_DEVINFO_NVME) ||
1679 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1680 	    == MPI26_PCIE_DEVINFO_SCSI))
1681 		return 1;
1682 	else
1683 		return 0;
1684 }
1685 
1686 /**
1687  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1688  * @ioc: per adapter object
1689  * @id: target id
1690  * @channel: channel
1691  * Context: This function will acquire ioc->scsi_lookup_lock.
1692  *
1693  * This will search for a matching channel:id in the scsi_lookup array,
1694  * returning 1 if found.
1695  */
1696 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1697 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1698 	int channel)
1699 {
1700 	int smid;
1701 	struct scsi_cmnd *scmd;
1702 
1703 	for (smid = 1;
1704 	     smid <= ioc->shost->can_queue; smid++) {
1705 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1706 		if (!scmd)
1707 			continue;
1708 		if (scmd->device->id == id &&
1709 		    scmd->device->channel == channel)
1710 			return 1;
1711 	}
1712 	return 0;
1713 }
1714 
1715 /**
1716  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1717  * @ioc: per adapter object
1718  * @id: target id
1719  * @lun: lun number
1720  * @channel: channel
1721  * Context: This function will acquire ioc->scsi_lookup_lock.
1722  *
1723  * This will search for a matching channel:id:lun in the scsi_lookup array,
1724  * returning 1 if found.
1725  */
1726 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1727 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1728 	unsigned int lun, int channel)
1729 {
1730 	int smid;
1731 	struct scsi_cmnd *scmd;
1732 
1733 	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1734 
1735 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1736 		if (!scmd)
1737 			continue;
1738 		if (scmd->device->id == id &&
1739 		    scmd->device->channel == channel &&
1740 		    scmd->device->lun == lun)
1741 			return 1;
1742 	}
1743 	return 0;
1744 }
1745 
1746 /**
1747  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1748  * @ioc: per adapter object
1749  * @smid: system request message index
1750  *
1751  * Return: the smid stored scmd pointer.
1752  * Then will dereference the stored scmd pointer.
1753  */
1754 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1755 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1756 {
1757 	struct scsi_cmnd *scmd = NULL;
1758 	struct scsiio_tracker *st;
1759 	Mpi25SCSIIORequest_t *mpi_request;
1760 	u16 tag = smid - 1;
1761 
1762 	if (smid > 0  &&
1763 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1764 		u32 unique_tag =
1765 		    ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1766 
1767 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1768 
1769 		/*
1770 		 * If SCSI IO request is outstanding at driver level then
1771 		 * DevHandle filed must be non-zero. If DevHandle is zero
1772 		 * then it means that this smid is free at driver level,
1773 		 * so return NULL.
1774 		 */
1775 		if (!mpi_request->DevHandle)
1776 			return scmd;
1777 
1778 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1779 		if (scmd) {
1780 			st = scsi_cmd_priv(scmd);
1781 			if (st->cb_idx == 0xFF || st->smid == 0)
1782 				scmd = NULL;
1783 		}
1784 	}
1785 	return scmd;
1786 }
1787 
1788 /**
1789  * scsih_change_queue_depth - setting device queue depth
1790  * @sdev: scsi device struct
1791  * @qdepth: requested queue depth
1792  *
1793  * Return: queue depth.
1794  */
1795 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1796 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1797 {
1798 	struct Scsi_Host *shost = sdev->host;
1799 	int max_depth;
1800 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1801 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1802 	struct MPT3SAS_TARGET *sas_target_priv_data;
1803 	struct _sas_device *sas_device;
1804 	unsigned long flags;
1805 
1806 	max_depth = shost->can_queue;
1807 
1808 	/*
1809 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1810 	 * is disabled.
1811 	 */
1812 	if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1813 		goto not_sata;
1814 
1815 	sas_device_priv_data = sdev->hostdata;
1816 	if (!sas_device_priv_data)
1817 		goto not_sata;
1818 	sas_target_priv_data = sas_device_priv_data->sas_target;
1819 	if (!sas_target_priv_data)
1820 		goto not_sata;
1821 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1822 		goto not_sata;
1823 
1824 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1825 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1826 	if (sas_device) {
1827 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1828 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1829 
1830 		sas_device_put(sas_device);
1831 	}
1832 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1833 
1834  not_sata:
1835 
1836 	if (!sdev->tagged_supported)
1837 		max_depth = 1;
1838 	if (qdepth > max_depth)
1839 		qdepth = max_depth;
1840 	scsi_change_queue_depth(sdev, qdepth);
1841 	sdev_printk(KERN_INFO, sdev,
1842 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1843 	    sdev->queue_depth, sdev->tagged_supported,
1844 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1845 	return sdev->queue_depth;
1846 }
1847 
1848 /**
1849  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1850  * @sdev: scsi device struct
1851  * @qdepth: requested queue depth
1852  *
1853  * Returns nothing.
1854  */
1855 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1856 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1857 {
1858 	struct Scsi_Host *shost = sdev->host;
1859 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1860 
1861 	if (ioc->enable_sdev_max_qd)
1862 		qdepth = shost->can_queue;
1863 
1864 	scsih_change_queue_depth(sdev, qdepth);
1865 }
1866 
1867 /**
1868  * scsih_target_alloc - target add routine
1869  * @starget: scsi target struct
1870  *
1871  * Return: 0 if ok. Any other return is assumed to be an error and
1872  * the device is ignored.
1873  */
1874 static int
scsih_target_alloc(struct scsi_target * starget)1875 scsih_target_alloc(struct scsi_target *starget)
1876 {
1877 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1878 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1879 	struct MPT3SAS_TARGET *sas_target_priv_data;
1880 	struct _sas_device *sas_device;
1881 	struct _raid_device *raid_device;
1882 	struct _pcie_device *pcie_device;
1883 	unsigned long flags;
1884 	struct sas_rphy *rphy;
1885 
1886 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1887 				       GFP_KERNEL);
1888 	if (!sas_target_priv_data)
1889 		return -ENOMEM;
1890 
1891 	starget->hostdata = sas_target_priv_data;
1892 	sas_target_priv_data->starget = starget;
1893 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1894 
1895 	/* RAID volumes */
1896 	if (starget->channel == RAID_CHANNEL) {
1897 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1898 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1899 		    starget->channel);
1900 		if (raid_device) {
1901 			sas_target_priv_data->handle = raid_device->handle;
1902 			sas_target_priv_data->sas_address = raid_device->wwid;
1903 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1904 			if (ioc->is_warpdrive)
1905 				sas_target_priv_data->raid_device = raid_device;
1906 			raid_device->starget = starget;
1907 		}
1908 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1909 		return 0;
1910 	}
1911 
1912 	/* PCIe devices */
1913 	if (starget->channel == PCIE_CHANNEL) {
1914 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1915 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1916 			starget->channel);
1917 		if (pcie_device) {
1918 			sas_target_priv_data->handle = pcie_device->handle;
1919 			sas_target_priv_data->sas_address = pcie_device->wwid;
1920 			sas_target_priv_data->port = NULL;
1921 			sas_target_priv_data->pcie_dev = pcie_device;
1922 			pcie_device->starget = starget;
1923 			pcie_device->id = starget->id;
1924 			pcie_device->channel = starget->channel;
1925 			sas_target_priv_data->flags |=
1926 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1927 			if (pcie_device->fast_path)
1928 				sas_target_priv_data->flags |=
1929 					MPT_TARGET_FASTPATH_IO;
1930 		}
1931 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1932 		return 0;
1933 	}
1934 
1935 	/* sas/sata devices */
1936 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1937 	rphy = dev_to_rphy(starget->dev.parent);
1938 	sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1939 
1940 	if (sas_device) {
1941 		sas_target_priv_data->handle = sas_device->handle;
1942 		sas_target_priv_data->sas_address = sas_device->sas_address;
1943 		sas_target_priv_data->port = sas_device->port;
1944 		sas_target_priv_data->sas_dev = sas_device;
1945 		sas_device->starget = starget;
1946 		sas_device->id = starget->id;
1947 		sas_device->channel = starget->channel;
1948 		if (test_bit(sas_device->handle, ioc->pd_handles))
1949 			sas_target_priv_data->flags |=
1950 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1951 		if (sas_device->fast_path)
1952 			sas_target_priv_data->flags |=
1953 					MPT_TARGET_FASTPATH_IO;
1954 	}
1955 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1956 
1957 	return 0;
1958 }
1959 
1960 /**
1961  * scsih_target_destroy - target destroy routine
1962  * @starget: scsi target struct
1963  */
1964 static void
scsih_target_destroy(struct scsi_target * starget)1965 scsih_target_destroy(struct scsi_target *starget)
1966 {
1967 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1968 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1969 	struct MPT3SAS_TARGET *sas_target_priv_data;
1970 	struct _sas_device *sas_device;
1971 	struct _raid_device *raid_device;
1972 	struct _pcie_device *pcie_device;
1973 	unsigned long flags;
1974 
1975 	sas_target_priv_data = starget->hostdata;
1976 	if (!sas_target_priv_data)
1977 		return;
1978 
1979 	if (starget->channel == RAID_CHANNEL) {
1980 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1981 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1982 		    starget->channel);
1983 		if (raid_device) {
1984 			raid_device->starget = NULL;
1985 			raid_device->sdev = NULL;
1986 		}
1987 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1988 		goto out;
1989 	}
1990 
1991 	if (starget->channel == PCIE_CHANNEL) {
1992 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1993 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1994 							sas_target_priv_data);
1995 		if (pcie_device && (pcie_device->starget == starget) &&
1996 			(pcie_device->id == starget->id) &&
1997 			(pcie_device->channel == starget->channel))
1998 			pcie_device->starget = NULL;
1999 
2000 		if (pcie_device) {
2001 			/*
2002 			 * Corresponding get() is in _scsih_target_alloc()
2003 			 */
2004 			sas_target_priv_data->pcie_dev = NULL;
2005 			pcie_device_put(pcie_device);
2006 			pcie_device_put(pcie_device);
2007 		}
2008 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2009 		goto out;
2010 	}
2011 
2012 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2013 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2014 	if (sas_device && (sas_device->starget == starget) &&
2015 	    (sas_device->id == starget->id) &&
2016 	    (sas_device->channel == starget->channel))
2017 		sas_device->starget = NULL;
2018 
2019 	if (sas_device) {
2020 		/*
2021 		 * Corresponding get() is in _scsih_target_alloc()
2022 		 */
2023 		sas_target_priv_data->sas_dev = NULL;
2024 		sas_device_put(sas_device);
2025 
2026 		sas_device_put(sas_device);
2027 	}
2028 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2029 
2030  out:
2031 	kfree(sas_target_priv_data);
2032 	starget->hostdata = NULL;
2033 }
2034 
2035 /**
2036  * scsih_sdev_init - device add routine
2037  * @sdev: scsi device struct
2038  *
2039  * Return: 0 if ok. Any other return is assumed to be an error and
2040  * the device is ignored.
2041  */
2042 static int
scsih_sdev_init(struct scsi_device * sdev)2043 scsih_sdev_init(struct scsi_device *sdev)
2044 {
2045 	struct Scsi_Host *shost;
2046 	struct MPT3SAS_ADAPTER *ioc;
2047 	struct MPT3SAS_TARGET *sas_target_priv_data;
2048 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2049 	struct scsi_target *starget;
2050 	struct _raid_device *raid_device;
2051 	struct _sas_device *sas_device;
2052 	struct _pcie_device *pcie_device;
2053 	unsigned long flags;
2054 
2055 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2056 				       GFP_KERNEL);
2057 	if (!sas_device_priv_data)
2058 		return -ENOMEM;
2059 
2060 	sas_device_priv_data->lun = sdev->lun;
2061 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2062 
2063 	starget = scsi_target(sdev);
2064 	sas_target_priv_data = starget->hostdata;
2065 	sas_target_priv_data->num_luns++;
2066 	sas_device_priv_data->sas_target = sas_target_priv_data;
2067 	sdev->hostdata = sas_device_priv_data;
2068 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2069 		sdev->no_uld_attach = 1;
2070 
2071 	shost = dev_to_shost(&starget->dev);
2072 	ioc = shost_priv(shost);
2073 	if (starget->channel == RAID_CHANNEL) {
2074 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2075 		raid_device = _scsih_raid_device_find_by_id(ioc,
2076 		    starget->id, starget->channel);
2077 		if (raid_device)
2078 			raid_device->sdev = sdev; /* raid is single lun */
2079 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2080 	}
2081 	if (starget->channel == PCIE_CHANNEL) {
2082 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2083 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2084 				sas_target_priv_data->sas_address);
2085 		if (pcie_device && (pcie_device->starget == NULL)) {
2086 			sdev_printk(KERN_INFO, sdev,
2087 			    "%s : pcie_device->starget set to starget @ %d\n",
2088 			    __func__, __LINE__);
2089 			pcie_device->starget = starget;
2090 		}
2091 
2092 		if (pcie_device)
2093 			pcie_device_put(pcie_device);
2094 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2095 
2096 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2097 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2098 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2099 		    sas_target_priv_data->sas_address,
2100 		    sas_target_priv_data->port);
2101 		if (sas_device && (sas_device->starget == NULL)) {
2102 			sdev_printk(KERN_INFO, sdev,
2103 			"%s : sas_device->starget set to starget @ %d\n",
2104 			     __func__, __LINE__);
2105 			sas_device->starget = starget;
2106 		}
2107 
2108 		if (sas_device)
2109 			sas_device_put(sas_device);
2110 
2111 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2112 	}
2113 
2114 	return 0;
2115 }
2116 
2117 /**
2118  * scsih_sdev_destroy - device destroy routine
2119  * @sdev: scsi device struct
2120  */
2121 static void
scsih_sdev_destroy(struct scsi_device * sdev)2122 scsih_sdev_destroy(struct scsi_device *sdev)
2123 {
2124 	struct MPT3SAS_TARGET *sas_target_priv_data;
2125 	struct scsi_target *starget;
2126 	struct Scsi_Host *shost;
2127 	struct MPT3SAS_ADAPTER *ioc;
2128 	struct _sas_device *sas_device;
2129 	struct _pcie_device *pcie_device;
2130 	unsigned long flags;
2131 
2132 	if (!sdev->hostdata)
2133 		return;
2134 
2135 	starget = scsi_target(sdev);
2136 	sas_target_priv_data = starget->hostdata;
2137 	sas_target_priv_data->num_luns--;
2138 
2139 	shost = dev_to_shost(&starget->dev);
2140 	ioc = shost_priv(shost);
2141 
2142 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2143 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2144 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2145 				sas_target_priv_data);
2146 		if (pcie_device && !sas_target_priv_data->num_luns)
2147 			pcie_device->starget = NULL;
2148 
2149 		if (pcie_device)
2150 			pcie_device_put(pcie_device);
2151 
2152 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2153 
2154 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2155 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2156 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
2157 				sas_target_priv_data);
2158 		if (sas_device && !sas_target_priv_data->num_luns)
2159 			sas_device->starget = NULL;
2160 
2161 		if (sas_device)
2162 			sas_device_put(sas_device);
2163 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2164 	}
2165 
2166 	kfree(sdev->hostdata);
2167 	sdev->hostdata = NULL;
2168 }
2169 
2170 /**
2171  * _scsih_display_sata_capabilities - sata capabilities
2172  * @ioc: per adapter object
2173  * @handle: device handle
2174  * @sdev: scsi device struct
2175  */
2176 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2177 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2178 	u16 handle, struct scsi_device *sdev)
2179 {
2180 	Mpi2ConfigReply_t mpi_reply;
2181 	Mpi2SasDevicePage0_t sas_device_pg0;
2182 	u32 ioc_status;
2183 	u16 flags;
2184 	u32 device_info;
2185 
2186 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2187 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2188 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2189 			__FILE__, __LINE__, __func__);
2190 		return;
2191 	}
2192 
2193 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2194 	    MPI2_IOCSTATUS_MASK;
2195 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2196 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2197 			__FILE__, __LINE__, __func__);
2198 		return;
2199 	}
2200 
2201 	flags = le16_to_cpu(sas_device_pg0.Flags);
2202 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2203 
2204 	sdev_printk(KERN_INFO, sdev,
2205 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2206 	    "sw_preserve(%s)\n",
2207 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2208 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2209 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2210 	    "n",
2211 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2212 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2213 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2214 }
2215 
2216 /*
2217  * raid transport support -
2218  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2219  * unloading the driver followed by a load - I believe that the subroutine
2220  * raid_class_release() is not cleaning up properly.
2221  */
2222 
2223 /**
2224  * scsih_is_raid - return boolean indicating device is raid volume
2225  * @dev: the device struct object
2226  */
2227 static int
scsih_is_raid(struct device * dev)2228 scsih_is_raid(struct device *dev)
2229 {
2230 	struct scsi_device *sdev = to_scsi_device(dev);
2231 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2232 
2233 	if (ioc->is_warpdrive)
2234 		return 0;
2235 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2236 }
2237 
2238 static int
scsih_is_nvme(struct device * dev)2239 scsih_is_nvme(struct device *dev)
2240 {
2241 	struct scsi_device *sdev = to_scsi_device(dev);
2242 
2243 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2244 }
2245 
2246 /**
2247  * scsih_get_resync - get raid volume resync percent complete
2248  * @dev: the device struct object
2249  */
2250 static void
scsih_get_resync(struct device * dev)2251 scsih_get_resync(struct device *dev)
2252 {
2253 	struct scsi_device *sdev = to_scsi_device(dev);
2254 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2255 	static struct _raid_device *raid_device;
2256 	unsigned long flags;
2257 	Mpi2RaidVolPage0_t vol_pg0;
2258 	Mpi2ConfigReply_t mpi_reply;
2259 	u32 volume_status_flags;
2260 	u8 percent_complete;
2261 	u16 handle;
2262 
2263 	percent_complete = 0;
2264 	handle = 0;
2265 	if (ioc->is_warpdrive)
2266 		goto out;
2267 
2268 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2269 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2270 	    sdev->channel);
2271 	if (raid_device) {
2272 		handle = raid_device->handle;
2273 		percent_complete = raid_device->percent_complete;
2274 	}
2275 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2276 
2277 	if (!handle)
2278 		goto out;
2279 
2280 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2281 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2282 	     sizeof(Mpi2RaidVolPage0_t))) {
2283 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2284 			__FILE__, __LINE__, __func__);
2285 		percent_complete = 0;
2286 		goto out;
2287 	}
2288 
2289 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2290 	if (!(volume_status_flags &
2291 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2292 		percent_complete = 0;
2293 
2294  out:
2295 
2296 	switch (ioc->hba_mpi_version_belonged) {
2297 	case MPI2_VERSION:
2298 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2299 		break;
2300 	case MPI25_VERSION:
2301 	case MPI26_VERSION:
2302 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2303 		break;
2304 	}
2305 }
2306 
2307 /**
2308  * scsih_get_state - get raid volume level
2309  * @dev: the device struct object
2310  */
2311 static void
scsih_get_state(struct device * dev)2312 scsih_get_state(struct device *dev)
2313 {
2314 	struct scsi_device *sdev = to_scsi_device(dev);
2315 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2316 	static struct _raid_device *raid_device;
2317 	unsigned long flags;
2318 	Mpi2RaidVolPage0_t vol_pg0;
2319 	Mpi2ConfigReply_t mpi_reply;
2320 	u32 volstate;
2321 	enum raid_state state = RAID_STATE_UNKNOWN;
2322 	u16 handle = 0;
2323 
2324 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2325 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2326 	    sdev->channel);
2327 	if (raid_device)
2328 		handle = raid_device->handle;
2329 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2330 
2331 	if (!raid_device)
2332 		goto out;
2333 
2334 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2335 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2336 	     sizeof(Mpi2RaidVolPage0_t))) {
2337 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2338 			__FILE__, __LINE__, __func__);
2339 		goto out;
2340 	}
2341 
2342 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2343 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2344 		state = RAID_STATE_RESYNCING;
2345 		goto out;
2346 	}
2347 
2348 	switch (vol_pg0.VolumeState) {
2349 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2350 	case MPI2_RAID_VOL_STATE_ONLINE:
2351 		state = RAID_STATE_ACTIVE;
2352 		break;
2353 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2354 		state = RAID_STATE_DEGRADED;
2355 		break;
2356 	case MPI2_RAID_VOL_STATE_FAILED:
2357 	case MPI2_RAID_VOL_STATE_MISSING:
2358 		state = RAID_STATE_OFFLINE;
2359 		break;
2360 	}
2361  out:
2362 	switch (ioc->hba_mpi_version_belonged) {
2363 	case MPI2_VERSION:
2364 		raid_set_state(mpt2sas_raid_template, dev, state);
2365 		break;
2366 	case MPI25_VERSION:
2367 	case MPI26_VERSION:
2368 		raid_set_state(mpt3sas_raid_template, dev, state);
2369 		break;
2370 	}
2371 }
2372 
2373 /**
2374  * _scsih_set_level - set raid level
2375  * @ioc: ?
2376  * @sdev: scsi device struct
2377  * @volume_type: volume type
2378  */
2379 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2380 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2381 	struct scsi_device *sdev, u8 volume_type)
2382 {
2383 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2384 
2385 	switch (volume_type) {
2386 	case MPI2_RAID_VOL_TYPE_RAID0:
2387 		level = RAID_LEVEL_0;
2388 		break;
2389 	case MPI2_RAID_VOL_TYPE_RAID10:
2390 		level = RAID_LEVEL_10;
2391 		break;
2392 	case MPI2_RAID_VOL_TYPE_RAID1E:
2393 		level = RAID_LEVEL_1E;
2394 		break;
2395 	case MPI2_RAID_VOL_TYPE_RAID1:
2396 		level = RAID_LEVEL_1;
2397 		break;
2398 	}
2399 
2400 	switch (ioc->hba_mpi_version_belonged) {
2401 	case MPI2_VERSION:
2402 		raid_set_level(mpt2sas_raid_template,
2403 			&sdev->sdev_gendev, level);
2404 		break;
2405 	case MPI25_VERSION:
2406 	case MPI26_VERSION:
2407 		raid_set_level(mpt3sas_raid_template,
2408 			&sdev->sdev_gendev, level);
2409 		break;
2410 	}
2411 }
2412 
2413 
2414 /**
2415  * _scsih_get_volume_capabilities - volume capabilities
2416  * @ioc: per adapter object
2417  * @raid_device: the raid_device object
2418  *
2419  * Return: 0 for success, else 1
2420  */
2421 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2422 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2423 	struct _raid_device *raid_device)
2424 {
2425 	Mpi2RaidVolPage0_t *vol_pg0;
2426 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2427 	Mpi2SasDevicePage0_t sas_device_pg0;
2428 	Mpi2ConfigReply_t mpi_reply;
2429 	u16 sz;
2430 	u8 num_pds;
2431 
2432 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2433 	    &num_pds)) || !num_pds) {
2434 		dfailprintk(ioc,
2435 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2436 				     __FILE__, __LINE__, __func__));
2437 		return 1;
2438 	}
2439 
2440 	raid_device->num_pds = num_pds;
2441 	sz = struct_size(vol_pg0, PhysDisk, num_pds);
2442 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2443 	if (!vol_pg0) {
2444 		dfailprintk(ioc,
2445 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2446 				     __FILE__, __LINE__, __func__));
2447 		return 1;
2448 	}
2449 
2450 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2451 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2452 		dfailprintk(ioc,
2453 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2454 				     __FILE__, __LINE__, __func__));
2455 		kfree(vol_pg0);
2456 		return 1;
2457 	}
2458 
2459 	raid_device->volume_type = vol_pg0->VolumeType;
2460 
2461 	/* figure out what the underlying devices are by
2462 	 * obtaining the device_info bits for the 1st device
2463 	 */
2464 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2465 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2466 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2467 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2468 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2469 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2470 			raid_device->device_info =
2471 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2472 		}
2473 	}
2474 
2475 	kfree(vol_pg0);
2476 	return 0;
2477 }
2478 
2479 /**
2480  * _scsih_enable_tlr - setting TLR flags
2481  * @ioc: per adapter object
2482  * @sdev: scsi device struct
2483  *
2484  * Enabling Transaction Layer Retries for tape devices when
2485  * vpd page 0x90 is present
2486  *
2487  */
2488 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2489 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2490 {
2491 
2492 	/* only for TAPE */
2493 	if (sdev->type != TYPE_TAPE)
2494 		return;
2495 
2496 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2497 		return;
2498 
2499 	sas_enable_tlr(sdev);
2500 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2501 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2502 	return;
2503 
2504 }
2505 
2506 /**
2507  * scsih_sdev_configure - device configure routine.
2508  * @sdev: scsi device struct
2509  * @lim: queue limits
2510  *
2511  * Return: 0 if ok. Any other return is assumed to be an error and
2512  * the device is ignored.
2513  */
2514 static int
scsih_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)2515 scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
2516 {
2517 	struct Scsi_Host *shost = sdev->host;
2518 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2519 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2520 	struct MPT3SAS_TARGET *sas_target_priv_data;
2521 	struct _sas_device *sas_device;
2522 	struct _pcie_device *pcie_device;
2523 	struct _raid_device *raid_device;
2524 	unsigned long flags;
2525 	int qdepth;
2526 	u8 ssp_target = 0;
2527 	char *ds = "";
2528 	char *r_level = "";
2529 	u16 handle, volume_handle = 0;
2530 	u64 volume_wwid = 0;
2531 
2532 	qdepth = 1;
2533 	sas_device_priv_data = sdev->hostdata;
2534 	sas_device_priv_data->configured_lun = 1;
2535 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2536 	sas_target_priv_data = sas_device_priv_data->sas_target;
2537 	handle = sas_target_priv_data->handle;
2538 
2539 	/* raid volume handling */
2540 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2541 
2542 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2543 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2544 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2545 		if (!raid_device) {
2546 			dfailprintk(ioc,
2547 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2548 					     __FILE__, __LINE__, __func__));
2549 			return 1;
2550 		}
2551 
2552 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2553 			dfailprintk(ioc,
2554 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2555 					     __FILE__, __LINE__, __func__));
2556 			return 1;
2557 		}
2558 
2559 		/*
2560 		 * WARPDRIVE: Initialize the required data for Direct IO
2561 		 */
2562 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2563 
2564 		/* RAID Queue Depth Support
2565 		 * IS volume = underlying qdepth of drive type, either
2566 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2567 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2568 		 */
2569 		if (raid_device->device_info &
2570 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2571 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2572 			ds = "SSP";
2573 		} else {
2574 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2575 			if (raid_device->device_info &
2576 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2577 				ds = "SATA";
2578 			else
2579 				ds = "STP";
2580 		}
2581 
2582 		switch (raid_device->volume_type) {
2583 		case MPI2_RAID_VOL_TYPE_RAID0:
2584 			r_level = "RAID0";
2585 			break;
2586 		case MPI2_RAID_VOL_TYPE_RAID1E:
2587 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2588 			if (ioc->manu_pg10.OEMIdentifier &&
2589 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2590 			    MFG10_GF0_R10_DISPLAY) &&
2591 			    !(raid_device->num_pds % 2))
2592 				r_level = "RAID10";
2593 			else
2594 				r_level = "RAID1E";
2595 			break;
2596 		case MPI2_RAID_VOL_TYPE_RAID1:
2597 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2598 			r_level = "RAID1";
2599 			break;
2600 		case MPI2_RAID_VOL_TYPE_RAID10:
2601 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2602 			r_level = "RAID10";
2603 			break;
2604 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2605 		default:
2606 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2607 			r_level = "RAIDX";
2608 			break;
2609 		}
2610 
2611 		if (!ioc->hide_ir_msg)
2612 			sdev_printk(KERN_INFO, sdev,
2613 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2614 			    " pd_count(%d), type(%s)\n",
2615 			    r_level, raid_device->handle,
2616 			    (unsigned long long)raid_device->wwid,
2617 			    raid_device->num_pds, ds);
2618 
2619 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2620 			lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
2621 			sdev_printk(KERN_INFO, sdev,
2622 					"Set queue's max_sector to: %u\n",
2623 						MPT3SAS_RAID_MAX_SECTORS);
2624 		}
2625 
2626 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2627 
2628 		/* raid transport support */
2629 		if (!ioc->is_warpdrive)
2630 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2631 		return 0;
2632 	}
2633 
2634 	/* non-raid handling */
2635 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2636 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2637 		    &volume_handle)) {
2638 			dfailprintk(ioc,
2639 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2640 					     __FILE__, __LINE__, __func__));
2641 			return 1;
2642 		}
2643 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2644 		    volume_handle, &volume_wwid)) {
2645 			dfailprintk(ioc,
2646 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2647 					     __FILE__, __LINE__, __func__));
2648 			return 1;
2649 		}
2650 	}
2651 
2652 	/* PCIe handling */
2653 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2654 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2655 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2656 				sas_device_priv_data->sas_target->sas_address);
2657 		if (!pcie_device) {
2658 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2659 			dfailprintk(ioc,
2660 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2661 					     __FILE__, __LINE__, __func__));
2662 			return 1;
2663 		}
2664 
2665 		qdepth = ioc->max_nvme_qd;
2666 		ds = "NVMe";
2667 		sdev_printk(KERN_INFO, sdev,
2668 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2669 			ds, handle, (unsigned long long)pcie_device->wwid,
2670 			pcie_device->port_num);
2671 		if (pcie_device->enclosure_handle != 0)
2672 			sdev_printk(KERN_INFO, sdev,
2673 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2674 			ds,
2675 			(unsigned long long)pcie_device->enclosure_logical_id,
2676 			pcie_device->slot);
2677 		if (pcie_device->connector_name[0] != '\0')
2678 			sdev_printk(KERN_INFO, sdev,
2679 				"%s: enclosure level(0x%04x),"
2680 				"connector name( %s)\n", ds,
2681 				pcie_device->enclosure_level,
2682 				pcie_device->connector_name);
2683 
2684 		if (pcie_device->nvme_mdts)
2685 			lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
2686 
2687 		pcie_device_put(pcie_device);
2688 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2689 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2690 		lim->virt_boundary_mask = ioc->page_size - 1;
2691 		return 0;
2692 	}
2693 
2694 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2695 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2696 	   sas_device_priv_data->sas_target->sas_address,
2697 	   sas_device_priv_data->sas_target->port);
2698 	if (!sas_device) {
2699 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2700 		dfailprintk(ioc,
2701 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2702 				     __FILE__, __LINE__, __func__));
2703 		return 1;
2704 	}
2705 
2706 	sas_device->volume_handle = volume_handle;
2707 	sas_device->volume_wwid = volume_wwid;
2708 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2709 		qdepth = (sas_device->port_type > 1) ?
2710 			ioc->max_wideport_qd : ioc->max_narrowport_qd;
2711 		ssp_target = 1;
2712 		if (sas_device->device_info &
2713 				MPI2_SAS_DEVICE_INFO_SEP) {
2714 			sdev_printk(KERN_INFO, sdev,
2715 			"set ignore_delay_remove for handle(0x%04x)\n",
2716 			sas_device_priv_data->sas_target->handle);
2717 			sas_device_priv_data->ignore_delay_remove = 1;
2718 			ds = "SES";
2719 		} else
2720 			ds = "SSP";
2721 	} else {
2722 		qdepth = ioc->max_sata_qd;
2723 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2724 			ds = "STP";
2725 		else if (sas_device->device_info &
2726 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2727 			ds = "SATA";
2728 	}
2729 
2730 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2731 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2732 	    ds, handle, (unsigned long long)sas_device->sas_address,
2733 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2734 
2735 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2736 
2737 	sas_device_put(sas_device);
2738 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2739 
2740 	if (!ssp_target)
2741 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2742 
2743 
2744 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2745 
2746 	if (ssp_target) {
2747 		sas_read_port_mode_page(sdev);
2748 		_scsih_enable_tlr(ioc, sdev);
2749 	}
2750 
2751 	return 0;
2752 }
2753 
2754 /**
2755  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2756  * @sdev: scsi device struct
2757  * @bdev: pointer to block device context
2758  * @capacity: device size (in 512 byte sectors)
2759  * @params: three element array to place output:
2760  *              params[0] number of heads (max 255)
2761  *              params[1] number of sectors (max 63)
2762  *              params[2] number of cylinders
2763  */
2764 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2765 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2766 	sector_t capacity, int params[])
2767 {
2768 	int		heads;
2769 	int		sectors;
2770 	sector_t	cylinders;
2771 	ulong		dummy;
2772 
2773 	heads = 64;
2774 	sectors = 32;
2775 
2776 	dummy = heads * sectors;
2777 	cylinders = capacity;
2778 	sector_div(cylinders, dummy);
2779 
2780 	/*
2781 	 * Handle extended translation size for logical drives
2782 	 * > 1Gb
2783 	 */
2784 	if ((ulong)capacity >= 0x200000) {
2785 		heads = 255;
2786 		sectors = 63;
2787 		dummy = heads * sectors;
2788 		cylinders = capacity;
2789 		sector_div(cylinders, dummy);
2790 	}
2791 
2792 	/* return result */
2793 	params[0] = heads;
2794 	params[1] = sectors;
2795 	params[2] = cylinders;
2796 
2797 	return 0;
2798 }
2799 
2800 /**
2801  * _scsih_response_code - translation of device response code
2802  * @ioc: per adapter object
2803  * @response_code: response code returned by the device
2804  */
2805 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2806 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2807 {
2808 	char *desc;
2809 
2810 	switch (response_code) {
2811 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2812 		desc = "task management request completed";
2813 		break;
2814 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2815 		desc = "invalid frame";
2816 		break;
2817 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2818 		desc = "task management request not supported";
2819 		break;
2820 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2821 		desc = "task management request failed";
2822 		break;
2823 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2824 		desc = "task management request succeeded";
2825 		break;
2826 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2827 		desc = "invalid lun";
2828 		break;
2829 	case 0xA:
2830 		desc = "overlapped tag attempted";
2831 		break;
2832 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2833 		desc = "task queued, however not sent to target";
2834 		break;
2835 	default:
2836 		desc = "unknown";
2837 		break;
2838 	}
2839 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2840 }
2841 
2842 /**
2843  * _scsih_tm_done - tm completion routine
2844  * @ioc: per adapter object
2845  * @smid: system request message index
2846  * @msix_index: MSIX table index supplied by the OS
2847  * @reply: reply message frame(lower 32bit addr)
2848  * Context: none.
2849  *
2850  * The callback handler when using scsih_issue_tm.
2851  *
2852  * Return: 1 meaning mf should be freed from _base_interrupt
2853  *         0 means the mf is freed from this function.
2854  */
2855 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2856 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2857 {
2858 	MPI2DefaultReply_t *mpi_reply;
2859 
2860 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2861 		return 1;
2862 	if (ioc->tm_cmds.smid != smid)
2863 		return 1;
2864 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2865 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2866 	if (mpi_reply) {
2867 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2868 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2869 	}
2870 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2871 	complete(&ioc->tm_cmds.done);
2872 	return 1;
2873 }
2874 
2875 /**
2876  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2877  * @ioc: per adapter object
2878  * @handle: device handle
2879  *
2880  * During taskmangement request, we need to freeze the device queue.
2881  */
2882 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2883 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2884 {
2885 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2886 	struct scsi_device *sdev;
2887 	u8 skip = 0;
2888 
2889 	shost_for_each_device(sdev, ioc->shost) {
2890 		if (skip)
2891 			continue;
2892 		sas_device_priv_data = sdev->hostdata;
2893 		if (!sas_device_priv_data)
2894 			continue;
2895 		if (sas_device_priv_data->sas_target->handle == handle) {
2896 			sas_device_priv_data->sas_target->tm_busy = 1;
2897 			skip = 1;
2898 			ioc->ignore_loginfos = 1;
2899 		}
2900 	}
2901 }
2902 
2903 /**
2904  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2905  * @ioc: per adapter object
2906  * @handle: device handle
2907  *
2908  * During taskmangement request, we need to freeze the device queue.
2909  */
2910 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2911 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2912 {
2913 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2914 	struct scsi_device *sdev;
2915 	u8 skip = 0;
2916 
2917 	shost_for_each_device(sdev, ioc->shost) {
2918 		if (skip)
2919 			continue;
2920 		sas_device_priv_data = sdev->hostdata;
2921 		if (!sas_device_priv_data)
2922 			continue;
2923 		if (sas_device_priv_data->sas_target->handle == handle) {
2924 			sas_device_priv_data->sas_target->tm_busy = 0;
2925 			skip = 1;
2926 			ioc->ignore_loginfos = 0;
2927 		}
2928 	}
2929 }
2930 
2931 /**
2932  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2933  * @ioc: per adapter object
2934  * @channel: the channel assigned by the OS
2935  * @id: the id assigned by the OS
2936  * @lun: lun number
2937  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2938  * @smid_task: smid assigned to the task
2939  *
2940  * Look whether TM has aborted the timed out SCSI command, if
2941  * TM has aborted the IO then return SUCCESS else return FAILED.
2942  */
2943 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2944 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2945 	uint id, uint lun, u8 type, u16 smid_task)
2946 {
2947 
2948 	if (smid_task <= ioc->shost->can_queue) {
2949 		switch (type) {
2950 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2951 			if (!(_scsih_scsi_lookup_find_by_target(ioc,
2952 			    id, channel)))
2953 				return SUCCESS;
2954 			break;
2955 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2956 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2957 			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2958 			    lun, channel)))
2959 				return SUCCESS;
2960 			break;
2961 		default:
2962 			return SUCCESS;
2963 		}
2964 	} else if (smid_task == ioc->scsih_cmds.smid) {
2965 		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2966 		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2967 			return SUCCESS;
2968 	} else if (smid_task == ioc->ctl_cmds.smid) {
2969 		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2970 		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2971 			return SUCCESS;
2972 	}
2973 
2974 	return FAILED;
2975 }
2976 
2977 /**
2978  * scsih_tm_post_processing - post processing of target & LUN reset
2979  * @ioc: per adapter object
2980  * @handle: device handle
2981  * @channel: the channel assigned by the OS
2982  * @id: the id assigned by the OS
2983  * @lun: lun number
2984  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2985  * @smid_task: smid assigned to the task
2986  *
2987  * Post processing of target & LUN reset. Due to interrupt latency
2988  * issue it possible that interrupt for aborted IO might not be
2989  * received yet. So before returning failure status, poll the
2990  * reply descriptor pools for the reply of timed out SCSI command.
2991  * Return FAILED status if reply for timed out is not received
2992  * otherwise return SUCCESS.
2993  */
2994 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2995 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2996 	uint channel, uint id, uint lun, u8 type, u16 smid_task)
2997 {
2998 	int rc;
2999 
3000 	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3001 	if (rc == SUCCESS)
3002 		return rc;
3003 
3004 	ioc_info(ioc,
3005 	    "Poll ReplyDescriptor queues for completion of"
3006 	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3007 	    smid_task, type, handle);
3008 
3009 	/*
3010 	 * Due to interrupt latency issues, driver may receive interrupt for
3011 	 * TM first and then for aborted SCSI IO command. So, poll all the
3012 	 * ReplyDescriptor pools before returning the FAILED status to SML.
3013 	 */
3014 	mpt3sas_base_mask_interrupts(ioc);
3015 	mpt3sas_base_sync_reply_irqs(ioc, 1);
3016 	mpt3sas_base_unmask_interrupts(ioc);
3017 
3018 	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3019 }
3020 
3021 /**
3022  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3023  * @ioc: per adapter struct
3024  * @handle: device handle
3025  * @channel: the channel assigned by the OS
3026  * @id: the id assigned by the OS
3027  * @lun: lun number
3028  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3029  * @smid_task: smid assigned to the task
3030  * @msix_task: MSIX table index supplied by the OS
3031  * @timeout: timeout in seconds
3032  * @tr_method: Target Reset Method
3033  * Context: user
3034  *
3035  * A generic API for sending task management requests to firmware.
3036  *
3037  * The callback index is set inside `ioc->tm_cb_idx`.
3038  * The caller is responsible to check for outstanding commands.
3039  *
3040  * Return: SUCCESS or FAILED.
3041  */
3042 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3043 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3044 	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3045 	u8 timeout, u8 tr_method)
3046 {
3047 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3048 	Mpi2SCSITaskManagementReply_t *mpi_reply;
3049 	Mpi25SCSIIORequest_t *request;
3050 	u16 smid = 0;
3051 	u32 ioc_state;
3052 	int rc;
3053 	u8 issue_reset = 0;
3054 
3055 	lockdep_assert_held(&ioc->tm_cmds.mutex);
3056 
3057 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3058 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3059 		return FAILED;
3060 	}
3061 
3062 	if (ioc->shost_recovery || ioc->remove_host ||
3063 	    ioc->pci_error_recovery) {
3064 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3065 		return FAILED;
3066 	}
3067 
3068 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3069 	if (ioc_state & MPI2_DOORBELL_USED) {
3070 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3071 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3072 		return (!rc) ? SUCCESS : FAILED;
3073 	}
3074 
3075 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3076 		mpt3sas_print_fault_code(ioc, ioc_state &
3077 		    MPI2_DOORBELL_DATA_MASK);
3078 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3079 		return (!rc) ? SUCCESS : FAILED;
3080 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3081 	    MPI2_IOC_STATE_COREDUMP) {
3082 		mpt3sas_print_coredump_info(ioc, ioc_state &
3083 		    MPI2_DOORBELL_DATA_MASK);
3084 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3085 		return (!rc) ? SUCCESS : FAILED;
3086 	}
3087 
3088 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3089 	if (!smid) {
3090 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3091 		return FAILED;
3092 	}
3093 
3094 	dtmprintk(ioc,
3095 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3096 			   handle, type, smid_task, timeout, tr_method));
3097 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
3098 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3099 	ioc->tm_cmds.smid = smid;
3100 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3101 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3102 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3103 	mpi_request->DevHandle = cpu_to_le16(handle);
3104 	mpi_request->TaskType = type;
3105 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3106 	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3107 		mpi_request->MsgFlags = tr_method;
3108 	mpi_request->TaskMID = cpu_to_le16(smid_task);
3109 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3110 	mpt3sas_scsih_set_tm_flag(ioc, handle);
3111 	init_completion(&ioc->tm_cmds.done);
3112 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
3113 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3114 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3115 		mpt3sas_check_cmd_timeout(ioc,
3116 		    ioc->tm_cmds.status, mpi_request,
3117 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3118 		if (issue_reset) {
3119 			rc = mpt3sas_base_hard_reset_handler(ioc,
3120 					FORCE_BIG_HAMMER);
3121 			rc = (!rc) ? SUCCESS : FAILED;
3122 			goto out;
3123 		}
3124 	}
3125 
3126 	/* sync IRQs in case those were busy during flush. */
3127 	mpt3sas_base_sync_reply_irqs(ioc, 0);
3128 
3129 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3130 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3131 		mpi_reply = ioc->tm_cmds.reply;
3132 		dtmprintk(ioc,
3133 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3134 				   le16_to_cpu(mpi_reply->IOCStatus),
3135 				   le32_to_cpu(mpi_reply->IOCLogInfo),
3136 				   le32_to_cpu(mpi_reply->TerminationCount)));
3137 		if (ioc->logging_level & MPT_DEBUG_TM) {
3138 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
3139 			if (mpi_reply->IOCStatus)
3140 				_debug_dump_mf(mpi_request,
3141 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3142 		}
3143 	}
3144 
3145 	switch (type) {
3146 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3147 		rc = SUCCESS;
3148 		/*
3149 		 * If DevHandle filed in smid_task's entry of request pool
3150 		 * doesn't match with device handle on which this task abort
3151 		 * TM is received then it means that TM has successfully
3152 		 * aborted the timed out command. Since smid_task's entry in
3153 		 * request pool will be memset to zero once the timed out
3154 		 * command is returned to the SML. If the command is not
3155 		 * aborted then smid_task’s entry won’t be cleared and it
3156 		 * will have same DevHandle value on which this task abort TM
3157 		 * is received and driver will return the TM status as FAILED.
3158 		 */
3159 		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3160 		if (le16_to_cpu(request->DevHandle) != handle)
3161 			break;
3162 
3163 		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3164 		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3165 		    handle, timeout, tr_method, smid_task, msix_task);
3166 		rc = FAILED;
3167 		break;
3168 
3169 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3170 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3171 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3172 		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3173 		    type, smid_task);
3174 		break;
3175 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3176 		rc = SUCCESS;
3177 		break;
3178 	default:
3179 		rc = FAILED;
3180 		break;
3181 	}
3182 
3183 out:
3184 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3185 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3186 	return rc;
3187 }
3188 
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3189 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3190 		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3191 		u16 msix_task, u8 timeout, u8 tr_method)
3192 {
3193 	int ret;
3194 
3195 	mutex_lock(&ioc->tm_cmds.mutex);
3196 	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3197 			smid_task, msix_task, timeout, tr_method);
3198 	mutex_unlock(&ioc->tm_cmds.mutex);
3199 
3200 	return ret;
3201 }
3202 
3203 /**
3204  * _scsih_tm_display_info - displays info about the device
3205  * @ioc: per adapter struct
3206  * @scmd: pointer to scsi command object
3207  *
3208  * Called by task management callback handlers.
3209  */
3210 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3211 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3212 {
3213 	struct scsi_target *starget = scmd->device->sdev_target;
3214 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3215 	struct _sas_device *sas_device = NULL;
3216 	struct _pcie_device *pcie_device = NULL;
3217 	unsigned long flags;
3218 	char *device_str = NULL;
3219 
3220 	if (!priv_target)
3221 		return;
3222 	if (ioc->hide_ir_msg)
3223 		device_str = "WarpDrive";
3224 	else
3225 		device_str = "volume";
3226 
3227 	scsi_print_command(scmd);
3228 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3229 		starget_printk(KERN_INFO, starget,
3230 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3231 			device_str, priv_target->handle,
3232 		    device_str, (unsigned long long)priv_target->sas_address);
3233 
3234 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3235 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3236 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3237 		if (pcie_device) {
3238 			starget_printk(KERN_INFO, starget,
3239 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3240 				pcie_device->handle,
3241 				(unsigned long long)pcie_device->wwid,
3242 				pcie_device->port_num);
3243 			if (pcie_device->enclosure_handle != 0)
3244 				starget_printk(KERN_INFO, starget,
3245 					"enclosure logical id(0x%016llx), slot(%d)\n",
3246 					(unsigned long long)
3247 					pcie_device->enclosure_logical_id,
3248 					pcie_device->slot);
3249 			if (pcie_device->connector_name[0] != '\0')
3250 				starget_printk(KERN_INFO, starget,
3251 					"enclosure level(0x%04x), connector name( %s)\n",
3252 					pcie_device->enclosure_level,
3253 					pcie_device->connector_name);
3254 			pcie_device_put(pcie_device);
3255 		}
3256 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3257 
3258 	} else {
3259 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3260 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3261 		if (sas_device) {
3262 			if (priv_target->flags &
3263 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3264 				starget_printk(KERN_INFO, starget,
3265 				    "volume handle(0x%04x), "
3266 				    "volume wwid(0x%016llx)\n",
3267 				    sas_device->volume_handle,
3268 				   (unsigned long long)sas_device->volume_wwid);
3269 			}
3270 			starget_printk(KERN_INFO, starget,
3271 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3272 			    sas_device->handle,
3273 			    (unsigned long long)sas_device->sas_address,
3274 			    sas_device->phy);
3275 
3276 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3277 			    NULL, starget);
3278 
3279 			sas_device_put(sas_device);
3280 		}
3281 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3282 	}
3283 }
3284 
3285 /**
3286  * scsih_abort - eh threads main abort routine
3287  * @scmd: pointer to scsi command object
3288  *
3289  * Return: SUCCESS if command aborted else FAILED
3290  */
3291 static int
scsih_abort(struct scsi_cmnd * scmd)3292 scsih_abort(struct scsi_cmnd *scmd)
3293 {
3294 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3295 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3296 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3297 	u16 handle;
3298 	int r;
3299 
3300 	u8 timeout = 30;
3301 	struct _pcie_device *pcie_device = NULL;
3302 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3303 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3304 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3305 	    (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3306 	_scsih_tm_display_info(ioc, scmd);
3307 
3308 	sas_device_priv_data = scmd->device->hostdata;
3309 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3310 	    ioc->remove_host) {
3311 		sdev_printk(KERN_INFO, scmd->device,
3312 		    "device been deleted! scmd(0x%p)\n", scmd);
3313 		scmd->result = DID_NO_CONNECT << 16;
3314 		scsi_done(scmd);
3315 		r = SUCCESS;
3316 		goto out;
3317 	}
3318 
3319 	/* check for completed command */
3320 	if (st == NULL || st->cb_idx == 0xFF) {
3321 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3322 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3323 		scmd->result = DID_RESET << 16;
3324 		r = SUCCESS;
3325 		goto out;
3326 	}
3327 
3328 	/* for hidden raid components and volumes this is not supported */
3329 	if (sas_device_priv_data->sas_target->flags &
3330 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3331 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3332 		scmd->result = DID_RESET << 16;
3333 		r = FAILED;
3334 		goto out;
3335 	}
3336 
3337 	mpt3sas_halt_firmware(ioc);
3338 
3339 	handle = sas_device_priv_data->sas_target->handle;
3340 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3341 	if (pcie_device && (!ioc->tm_custom_handling) &&
3342 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3343 		timeout = ioc->nvme_abort_timeout;
3344 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3345 		scmd->device->id, scmd->device->lun,
3346 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3347 		st->smid, st->msix_io, timeout, 0);
3348 	/* Command must be cleared after abort */
3349 	if (r == SUCCESS && st->cb_idx != 0xFF)
3350 		r = FAILED;
3351  out:
3352 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3353 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3354 	if (pcie_device)
3355 		pcie_device_put(pcie_device);
3356 	return r;
3357 }
3358 
3359 /**
3360  * scsih_dev_reset - eh threads main device reset routine
3361  * @scmd: pointer to scsi command object
3362  *
3363  * Return: SUCCESS if command aborted else FAILED
3364  */
3365 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3366 scsih_dev_reset(struct scsi_cmnd *scmd)
3367 {
3368 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3369 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3370 	struct _sas_device *sas_device = NULL;
3371 	struct _pcie_device *pcie_device = NULL;
3372 	u16	handle;
3373 	u8	tr_method = 0;
3374 	u8	tr_timeout = 30;
3375 	int r;
3376 
3377 	struct scsi_target *starget = scmd->device->sdev_target;
3378 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3379 
3380 	sdev_printk(KERN_INFO, scmd->device,
3381 	    "attempting device reset! scmd(0x%p)\n", scmd);
3382 	_scsih_tm_display_info(ioc, scmd);
3383 
3384 	sas_device_priv_data = scmd->device->hostdata;
3385 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3386 	    ioc->remove_host) {
3387 		sdev_printk(KERN_INFO, scmd->device,
3388 		    "device been deleted! scmd(0x%p)\n", scmd);
3389 		scmd->result = DID_NO_CONNECT << 16;
3390 		scsi_done(scmd);
3391 		r = SUCCESS;
3392 		goto out;
3393 	}
3394 
3395 	/* for hidden raid components obtain the volume_handle */
3396 	handle = 0;
3397 	if (sas_device_priv_data->sas_target->flags &
3398 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3399 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3400 				target_priv_data);
3401 		if (sas_device)
3402 			handle = sas_device->volume_handle;
3403 	} else
3404 		handle = sas_device_priv_data->sas_target->handle;
3405 
3406 	if (!handle) {
3407 		scmd->result = DID_RESET << 16;
3408 		r = FAILED;
3409 		goto out;
3410 	}
3411 
3412 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3413 
3414 	if (pcie_device && (!ioc->tm_custom_handling) &&
3415 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3416 		tr_timeout = pcie_device->reset_timeout;
3417 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3418 	} else
3419 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3420 
3421 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3422 		scmd->device->id, scmd->device->lun,
3423 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3424 		tr_timeout, tr_method);
3425 	/* Check for busy commands after reset */
3426 	if (r == SUCCESS && scsi_device_busy(scmd->device))
3427 		r = FAILED;
3428  out:
3429 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3430 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3431 
3432 	if (sas_device)
3433 		sas_device_put(sas_device);
3434 	if (pcie_device)
3435 		pcie_device_put(pcie_device);
3436 
3437 	return r;
3438 }
3439 
3440 /**
3441  * scsih_target_reset - eh threads main target reset routine
3442  * @scmd: pointer to scsi command object
3443  *
3444  * Return: SUCCESS if command aborted else FAILED
3445  */
3446 static int
scsih_target_reset(struct scsi_cmnd * scmd)3447 scsih_target_reset(struct scsi_cmnd *scmd)
3448 {
3449 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3450 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3451 	struct _sas_device *sas_device = NULL;
3452 	struct _pcie_device *pcie_device = NULL;
3453 	u16	handle;
3454 	u8	tr_method = 0;
3455 	u8	tr_timeout = 30;
3456 	int r;
3457 	struct scsi_target *starget = scmd->device->sdev_target;
3458 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3459 
3460 	starget_printk(KERN_INFO, starget,
3461 	    "attempting target reset! scmd(0x%p)\n", scmd);
3462 	_scsih_tm_display_info(ioc, scmd);
3463 
3464 	sas_device_priv_data = scmd->device->hostdata;
3465 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3466 	    ioc->remove_host) {
3467 		starget_printk(KERN_INFO, starget,
3468 		    "target been deleted! scmd(0x%p)\n", scmd);
3469 		scmd->result = DID_NO_CONNECT << 16;
3470 		scsi_done(scmd);
3471 		r = SUCCESS;
3472 		goto out;
3473 	}
3474 
3475 	/* for hidden raid components obtain the volume_handle */
3476 	handle = 0;
3477 	if (sas_device_priv_data->sas_target->flags &
3478 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3479 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3480 				target_priv_data);
3481 		if (sas_device)
3482 			handle = sas_device->volume_handle;
3483 	} else
3484 		handle = sas_device_priv_data->sas_target->handle;
3485 
3486 	if (!handle) {
3487 		scmd->result = DID_RESET << 16;
3488 		r = FAILED;
3489 		goto out;
3490 	}
3491 
3492 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3493 
3494 	if (pcie_device && (!ioc->tm_custom_handling) &&
3495 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3496 		tr_timeout = pcie_device->reset_timeout;
3497 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3498 	} else
3499 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3500 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3501 		scmd->device->id, 0,
3502 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3503 	    tr_timeout, tr_method);
3504 	/* Check for busy commands after reset */
3505 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3506 		r = FAILED;
3507  out:
3508 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3509 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3510 
3511 	if (sas_device)
3512 		sas_device_put(sas_device);
3513 	if (pcie_device)
3514 		pcie_device_put(pcie_device);
3515 	return r;
3516 }
3517 
3518 
3519 /**
3520  * scsih_host_reset - eh threads main host reset routine
3521  * @scmd: pointer to scsi command object
3522  *
3523  * Return: SUCCESS if command aborted else FAILED
3524  */
3525 static int
scsih_host_reset(struct scsi_cmnd * scmd)3526 scsih_host_reset(struct scsi_cmnd *scmd)
3527 {
3528 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3529 	int r, retval;
3530 
3531 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3532 	scsi_print_command(scmd);
3533 
3534 	if (ioc->is_driver_loading || ioc->remove_host) {
3535 		ioc_info(ioc, "Blocking the host reset\n");
3536 		r = FAILED;
3537 		goto out;
3538 	}
3539 
3540 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3541 	r = (retval < 0) ? FAILED : SUCCESS;
3542 out:
3543 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3544 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3545 
3546 	return r;
3547 }
3548 
3549 /**
3550  * _scsih_fw_event_add - insert and queue up fw_event
3551  * @ioc: per adapter object
3552  * @fw_event: object describing the event
3553  * Context: This function will acquire ioc->fw_event_lock.
3554  *
3555  * This adds the firmware event object into link list, then queues it up to
3556  * be processed from user context.
3557  */
3558 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3559 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3560 {
3561 	unsigned long flags;
3562 
3563 	if (ioc->firmware_event_thread == NULL)
3564 		return;
3565 
3566 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3567 	fw_event_work_get(fw_event);
3568 	INIT_LIST_HEAD(&fw_event->list);
3569 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3570 	INIT_WORK(&fw_event->work, _firmware_event_work);
3571 	fw_event_work_get(fw_event);
3572 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3573 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3574 }
3575 
3576 /**
3577  * _scsih_fw_event_del_from_list - delete fw_event from the list
3578  * @ioc: per adapter object
3579  * @fw_event: object describing the event
3580  * Context: This function will acquire ioc->fw_event_lock.
3581  *
3582  * If the fw_event is on the fw_event_list, remove it and do a put.
3583  */
3584 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3585 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3586 	*fw_event)
3587 {
3588 	unsigned long flags;
3589 
3590 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3591 	if (!list_empty(&fw_event->list)) {
3592 		list_del_init(&fw_event->list);
3593 		fw_event_work_put(fw_event);
3594 	}
3595 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3596 }
3597 
3598 
3599  /**
3600  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3601  * @ioc: per adapter object
3602  * @event_data: trigger event data
3603  */
3604 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3605 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3606 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3607 {
3608 	struct fw_event_work *fw_event;
3609 	u16 sz;
3610 
3611 	if (ioc->is_driver_loading)
3612 		return;
3613 	sz = sizeof(*event_data);
3614 	fw_event = alloc_fw_event_work(sz);
3615 	if (!fw_event)
3616 		return;
3617 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3618 	fw_event->ioc = ioc;
3619 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3620 	_scsih_fw_event_add(ioc, fw_event);
3621 	fw_event_work_put(fw_event);
3622 }
3623 
3624 /**
3625  * _scsih_error_recovery_delete_devices - remove devices not responding
3626  * @ioc: per adapter object
3627  */
3628 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3629 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3630 {
3631 	struct fw_event_work *fw_event;
3632 
3633 	fw_event = alloc_fw_event_work(0);
3634 	if (!fw_event)
3635 		return;
3636 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3637 	fw_event->ioc = ioc;
3638 	_scsih_fw_event_add(ioc, fw_event);
3639 	fw_event_work_put(fw_event);
3640 }
3641 
3642 /**
3643  * mpt3sas_port_enable_complete - port enable completed (fake event)
3644  * @ioc: per adapter object
3645  */
3646 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3647 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3648 {
3649 	struct fw_event_work *fw_event;
3650 
3651 	fw_event = alloc_fw_event_work(0);
3652 	if (!fw_event)
3653 		return;
3654 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3655 	fw_event->ioc = ioc;
3656 	_scsih_fw_event_add(ioc, fw_event);
3657 	fw_event_work_put(fw_event);
3658 }
3659 
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3660 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3661 {
3662 	unsigned long flags;
3663 	struct fw_event_work *fw_event = NULL;
3664 
3665 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3666 	if (!list_empty(&ioc->fw_event_list)) {
3667 		fw_event = list_first_entry(&ioc->fw_event_list,
3668 				struct fw_event_work, list);
3669 		list_del_init(&fw_event->list);
3670 		fw_event_work_put(fw_event);
3671 	}
3672 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3673 
3674 	return fw_event;
3675 }
3676 
3677 /**
3678  * _scsih_fw_event_cleanup_queue - cleanup event queue
3679  * @ioc: per adapter object
3680  *
3681  * Walk the firmware event queue, either killing timers, or waiting
3682  * for outstanding events to complete
3683  *
3684  * Context: task, can sleep
3685  */
3686 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3687 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3688 {
3689 	struct fw_event_work *fw_event;
3690 
3691 	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3692 	    !ioc->firmware_event_thread)
3693 		return;
3694 	/*
3695 	 * Set current running event as ignore, so that
3696 	 * current running event will exit quickly.
3697 	 * As diag reset has occurred it is of no use
3698 	 * to process remaining stale event data entries.
3699 	 */
3700 	if (ioc->shost_recovery && ioc->current_event)
3701 		ioc->current_event->ignore = 1;
3702 
3703 	ioc->fw_events_cleanup = 1;
3704 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3705 	     (fw_event = ioc->current_event)) {
3706 
3707 		/*
3708 		 * Don't call cancel_work_sync() for current_event
3709 		 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3710 		 * otherwise we may observe deadlock if current
3711 		 * hard reset issued as part of processing the current_event.
3712 		 *
3713 		 * Orginal logic of cleaning the current_event is added
3714 		 * for handling the back to back host reset issued by the user.
3715 		 * i.e. during back to back host reset, driver use to process
3716 		 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3717 		 * event back to back and this made the drives to unregister
3718 		 * the devices from SML.
3719 		 */
3720 
3721 		if (fw_event == ioc->current_event &&
3722 		    ioc->current_event->event !=
3723 		    MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3724 			ioc->current_event = NULL;
3725 			continue;
3726 		}
3727 
3728 		/*
3729 		 * Driver has to clear ioc->start_scan flag when
3730 		 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3731 		 * otherwise scsi_scan_host() API waits for the
3732 		 * 5 minute timer to expire. If we exit from
3733 		 * scsi_scan_host() early then we can issue the
3734 		 * new port enable request as part of current diag reset.
3735 		 */
3736 		if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3737 			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3738 			ioc->start_scan = 0;
3739 		}
3740 
3741 		/*
3742 		 * Wait on the fw_event to complete. If this returns 1, then
3743 		 * the event was never executed, and we need a put for the
3744 		 * reference the work had on the fw_event.
3745 		 *
3746 		 * If it did execute, we wait for it to finish, and the put will
3747 		 * happen from _firmware_event_work()
3748 		 */
3749 		if (cancel_work_sync(&fw_event->work))
3750 			fw_event_work_put(fw_event);
3751 
3752 	}
3753 	ioc->fw_events_cleanup = 0;
3754 }
3755 
3756 /**
3757  * _scsih_internal_device_block - block the sdev device
3758  * @sdev: per device object
3759  * @sas_device_priv_data : per device driver private data
3760  *
3761  * make sure device is blocked without error, if not
3762  * print an error
3763  */
3764 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3765 _scsih_internal_device_block(struct scsi_device *sdev,
3766 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3767 {
3768 	int r = 0;
3769 
3770 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3771 	    sas_device_priv_data->sas_target->handle);
3772 	sas_device_priv_data->block = 1;
3773 
3774 	r = scsi_internal_device_block_nowait(sdev);
3775 	if (r == -EINVAL)
3776 		sdev_printk(KERN_WARNING, sdev,
3777 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3778 		    r, sas_device_priv_data->sas_target->handle);
3779 }
3780 
3781 /**
3782  * _scsih_internal_device_unblock - unblock the sdev device
3783  * @sdev: per device object
3784  * @sas_device_priv_data : per device driver private data
3785  * make sure device is unblocked without error, if not retry
3786  * by blocking and then unblocking
3787  */
3788 
3789 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3790 _scsih_internal_device_unblock(struct scsi_device *sdev,
3791 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3792 {
3793 	int r = 0;
3794 
3795 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3796 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3797 	sas_device_priv_data->block = 0;
3798 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3799 	if (r == -EINVAL) {
3800 		/* The device has been set to SDEV_RUNNING by SD layer during
3801 		 * device addition but the request queue is still stopped by
3802 		 * our earlier block call. We need to perform a block again
3803 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3804 
3805 		sdev_printk(KERN_WARNING, sdev,
3806 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3807 		    "performing a block followed by an unblock\n",
3808 		    r, sas_device_priv_data->sas_target->handle);
3809 		sas_device_priv_data->block = 1;
3810 		r = scsi_internal_device_block_nowait(sdev);
3811 		if (r)
3812 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3813 			    "failed with return(%d) for handle(0x%04x)\n",
3814 			    r, sas_device_priv_data->sas_target->handle);
3815 
3816 		sas_device_priv_data->block = 0;
3817 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3818 		if (r)
3819 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3820 			    " failed with return(%d) for handle(0x%04x)\n",
3821 			    r, sas_device_priv_data->sas_target->handle);
3822 	}
3823 }
3824 
3825 /**
3826  * _scsih_ublock_io_all_device - unblock every device
3827  * @ioc: per adapter object
3828  *
3829  * change the device state from block to running
3830  */
3831 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3832 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3833 {
3834 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3835 	struct scsi_device *sdev;
3836 
3837 	shost_for_each_device(sdev, ioc->shost) {
3838 		sas_device_priv_data = sdev->hostdata;
3839 		if (!sas_device_priv_data)
3840 			continue;
3841 		if (!sas_device_priv_data->block)
3842 			continue;
3843 
3844 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3845 			"device_running, handle(0x%04x)\n",
3846 		    sas_device_priv_data->sas_target->handle));
3847 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3848 	}
3849 }
3850 
3851 
3852 /**
3853  * _scsih_ublock_io_device - prepare device to be deleted
3854  * @ioc: per adapter object
3855  * @sas_address: sas address
3856  * @port: hba port entry
3857  *
3858  * unblock then put device in offline state
3859  */
3860 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)3861 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3862 	u64 sas_address, struct hba_port *port)
3863 {
3864 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3865 	struct scsi_device *sdev;
3866 
3867 	shost_for_each_device(sdev, ioc->shost) {
3868 		sas_device_priv_data = sdev->hostdata;
3869 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3870 			continue;
3871 		if (sas_device_priv_data->sas_target->sas_address
3872 		    != sas_address)
3873 			continue;
3874 		if (sas_device_priv_data->sas_target->port != port)
3875 			continue;
3876 		if (sas_device_priv_data->block)
3877 			_scsih_internal_device_unblock(sdev,
3878 				sas_device_priv_data);
3879 	}
3880 }
3881 
3882 /**
3883  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3884  * @ioc: per adapter object
3885  *
3886  * During device pull we need to appropriately set the sdev state.
3887  */
3888 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3889 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3890 {
3891 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3892 	struct scsi_device *sdev;
3893 
3894 	shost_for_each_device(sdev, ioc->shost) {
3895 		sas_device_priv_data = sdev->hostdata;
3896 		if (!sas_device_priv_data)
3897 			continue;
3898 		if (sas_device_priv_data->block)
3899 			continue;
3900 		if (sas_device_priv_data->ignore_delay_remove) {
3901 			sdev_printk(KERN_INFO, sdev,
3902 			"%s skip device_block for SES handle(0x%04x)\n",
3903 			__func__, sas_device_priv_data->sas_target->handle);
3904 			continue;
3905 		}
3906 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3907 	}
3908 }
3909 
3910 /**
3911  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3912  * @ioc: per adapter object
3913  * @handle: device handle
3914  *
3915  * During device pull we need to appropriately set the sdev state.
3916  */
3917 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3918 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3919 {
3920 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3921 	struct scsi_device *sdev;
3922 	struct _sas_device *sas_device;
3923 
3924 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3925 
3926 	shost_for_each_device(sdev, ioc->shost) {
3927 		sas_device_priv_data = sdev->hostdata;
3928 		if (!sas_device_priv_data)
3929 			continue;
3930 		if (sas_device_priv_data->sas_target->handle != handle)
3931 			continue;
3932 		if (sas_device_priv_data->block)
3933 			continue;
3934 		if (sas_device && sas_device->pend_sas_rphy_add)
3935 			continue;
3936 		if (sas_device_priv_data->ignore_delay_remove) {
3937 			sdev_printk(KERN_INFO, sdev,
3938 			"%s skip device_block for SES handle(0x%04x)\n",
3939 			__func__, sas_device_priv_data->sas_target->handle);
3940 			continue;
3941 		}
3942 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3943 	}
3944 
3945 	if (sas_device)
3946 		sas_device_put(sas_device);
3947 }
3948 
3949 /**
3950  * _scsih_block_io_to_children_attached_to_ex
3951  * @ioc: per adapter object
3952  * @sas_expander: the sas_device object
3953  *
3954  * This routine set sdev state to SDEV_BLOCK for all devices
3955  * attached to this expander. This function called when expander is
3956  * pulled.
3957  */
3958 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3959 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3960 	struct _sas_node *sas_expander)
3961 {
3962 	struct _sas_port *mpt3sas_port;
3963 	struct _sas_device *sas_device;
3964 	struct _sas_node *expander_sibling;
3965 	unsigned long flags;
3966 
3967 	if (!sas_expander)
3968 		return;
3969 
3970 	list_for_each_entry(mpt3sas_port,
3971 	   &sas_expander->sas_port_list, port_list) {
3972 		if (mpt3sas_port->remote_identify.device_type ==
3973 		    SAS_END_DEVICE) {
3974 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3975 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3976 			    mpt3sas_port->remote_identify.sas_address,
3977 			    mpt3sas_port->hba_port);
3978 			if (sas_device) {
3979 				set_bit(sas_device->handle,
3980 						ioc->blocking_handles);
3981 				sas_device_put(sas_device);
3982 			}
3983 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3984 		}
3985 	}
3986 
3987 	list_for_each_entry(mpt3sas_port,
3988 	   &sas_expander->sas_port_list, port_list) {
3989 
3990 		if (mpt3sas_port->remote_identify.device_type ==
3991 		    SAS_EDGE_EXPANDER_DEVICE ||
3992 		    mpt3sas_port->remote_identify.device_type ==
3993 		    SAS_FANOUT_EXPANDER_DEVICE) {
3994 			expander_sibling =
3995 			    mpt3sas_scsih_expander_find_by_sas_address(
3996 			    ioc, mpt3sas_port->remote_identify.sas_address,
3997 			    mpt3sas_port->hba_port);
3998 			_scsih_block_io_to_children_attached_to_ex(ioc,
3999 			    expander_sibling);
4000 		}
4001 	}
4002 }
4003 
4004 /**
4005  * _scsih_block_io_to_children_attached_directly
4006  * @ioc: per adapter object
4007  * @event_data: topology change event data
4008  *
4009  * This routine set sdev state to SDEV_BLOCK for all devices
4010  * direct attached during device pull.
4011  */
4012 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4013 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4014 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4015 {
4016 	int i;
4017 	u16 handle;
4018 	u16 reason_code;
4019 
4020 	for (i = 0; i < event_data->NumEntries; i++) {
4021 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4022 		if (!handle)
4023 			continue;
4024 		reason_code = event_data->PHY[i].PhyStatus &
4025 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4026 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4027 			_scsih_block_io_device(ioc, handle);
4028 	}
4029 }
4030 
4031 /**
4032  * _scsih_block_io_to_pcie_children_attached_directly
4033  * @ioc: per adapter object
4034  * @event_data: topology change event data
4035  *
4036  * This routine set sdev state to SDEV_BLOCK for all devices
4037  * direct attached during device pull/reconnect.
4038  */
4039 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4040 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4041 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4042 {
4043 	int i;
4044 	u16 handle;
4045 	u16 reason_code;
4046 
4047 	for (i = 0; i < event_data->NumEntries; i++) {
4048 		handle =
4049 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4050 		if (!handle)
4051 			continue;
4052 		reason_code = event_data->PortEntry[i].PortStatus;
4053 		if (reason_code ==
4054 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4055 			_scsih_block_io_device(ioc, handle);
4056 	}
4057 }
4058 /**
4059  * _scsih_tm_tr_send - send task management request
4060  * @ioc: per adapter object
4061  * @handle: device handle
4062  * Context: interrupt time.
4063  *
4064  * This code is to initiate the device removal handshake protocol
4065  * with controller firmware.  This function will issue target reset
4066  * using high priority request queue.  It will send a sas iounit
4067  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4068  *
4069  * This is designed to send muliple task management request at the same
4070  * time to the fifo. If the fifo is full, we will append the request,
4071  * and process it in a future completion.
4072  */
4073 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4074 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4075 {
4076 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4077 	u16 smid;
4078 	struct _sas_device *sas_device = NULL;
4079 	struct _pcie_device *pcie_device = NULL;
4080 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4081 	u64 sas_address = 0;
4082 	unsigned long flags;
4083 	struct _tr_list *delayed_tr;
4084 	u32 ioc_state;
4085 	u8 tr_method = 0;
4086 	struct hba_port *port = NULL;
4087 
4088 	if (ioc->pci_error_recovery) {
4089 		dewtprintk(ioc,
4090 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4091 				    __func__, handle));
4092 		return;
4093 	}
4094 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4095 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4096 		dewtprintk(ioc,
4097 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4098 				    __func__, handle));
4099 		return;
4100 	}
4101 
4102 	/* if PD, then return */
4103 	if (test_bit(handle, ioc->pd_handles))
4104 		return;
4105 
4106 	clear_bit(handle, ioc->pend_os_device_add);
4107 
4108 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
4109 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4110 	if (sas_device && sas_device->starget &&
4111 	    sas_device->starget->hostdata) {
4112 		sas_target_priv_data = sas_device->starget->hostdata;
4113 		sas_target_priv_data->deleted = 1;
4114 		sas_address = sas_device->sas_address;
4115 		port = sas_device->port;
4116 	}
4117 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4118 	if (!sas_device) {
4119 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4120 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4121 		if (pcie_device && pcie_device->starget &&
4122 			pcie_device->starget->hostdata) {
4123 			sas_target_priv_data = pcie_device->starget->hostdata;
4124 			sas_target_priv_data->deleted = 1;
4125 			sas_address = pcie_device->wwid;
4126 		}
4127 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4128 		if (pcie_device && (!ioc->tm_custom_handling) &&
4129 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
4130 		    pcie_device->device_info))))
4131 			tr_method =
4132 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4133 		else
4134 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4135 	}
4136 	if (sas_target_priv_data) {
4137 		dewtprintk(ioc,
4138 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4139 				    handle, (u64)sas_address));
4140 		if (sas_device) {
4141 			if (sas_device->enclosure_handle != 0)
4142 				dewtprintk(ioc,
4143 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4144 						    (u64)sas_device->enclosure_logical_id,
4145 						    sas_device->slot));
4146 			if (sas_device->connector_name[0] != '\0')
4147 				dewtprintk(ioc,
4148 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4149 						    sas_device->enclosure_level,
4150 						    sas_device->connector_name));
4151 		} else if (pcie_device) {
4152 			if (pcie_device->enclosure_handle != 0)
4153 				dewtprintk(ioc,
4154 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4155 						    (u64)pcie_device->enclosure_logical_id,
4156 						    pcie_device->slot));
4157 			if (pcie_device->connector_name[0] != '\0')
4158 				dewtprintk(ioc,
4159 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4160 						    pcie_device->enclosure_level,
4161 						    pcie_device->connector_name));
4162 		}
4163 		_scsih_ublock_io_device(ioc, sas_address, port);
4164 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4165 	}
4166 
4167 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4168 	if (!smid) {
4169 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4170 		if (!delayed_tr)
4171 			goto out;
4172 		INIT_LIST_HEAD(&delayed_tr->list);
4173 		delayed_tr->handle = handle;
4174 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4175 		dewtprintk(ioc,
4176 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4177 				    handle));
4178 		goto out;
4179 	}
4180 
4181 	dewtprintk(ioc,
4182 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4183 			    handle, smid, ioc->tm_tr_cb_idx));
4184 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4185 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4186 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4187 	mpi_request->DevHandle = cpu_to_le16(handle);
4188 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4189 	mpi_request->MsgFlags = tr_method;
4190 	set_bit(handle, ioc->device_remove_in_progress);
4191 	ioc->put_smid_hi_priority(ioc, smid, 0);
4192 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4193 
4194 out:
4195 	if (sas_device)
4196 		sas_device_put(sas_device);
4197 	if (pcie_device)
4198 		pcie_device_put(pcie_device);
4199 }
4200 
4201 /**
4202  * _scsih_tm_tr_complete -
4203  * @ioc: per adapter object
4204  * @smid: system request message index
4205  * @msix_index: MSIX table index supplied by the OS
4206  * @reply: reply message frame(lower 32bit addr)
4207  * Context: interrupt time.
4208  *
4209  * This is the target reset completion routine.
4210  * This code is part of the code to initiate the device removal
4211  * handshake protocol with controller firmware.
4212  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4213  *
4214  * Return: 1 meaning mf should be freed from _base_interrupt
4215  *         0 means the mf is freed from this function.
4216  */
4217 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4218 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4219 	u32 reply)
4220 {
4221 	u16 handle;
4222 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4223 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4224 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4225 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4226 	u16 smid_sas_ctrl;
4227 	u32 ioc_state;
4228 	struct _sc_list *delayed_sc;
4229 
4230 	if (ioc->pci_error_recovery) {
4231 		dewtprintk(ioc,
4232 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4233 				    __func__));
4234 		return 1;
4235 	}
4236 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4237 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4238 		dewtprintk(ioc,
4239 			   ioc_info(ioc, "%s: host is not operational\n",
4240 				    __func__));
4241 		return 1;
4242 	}
4243 	if (unlikely(!mpi_reply)) {
4244 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4245 			__FILE__, __LINE__, __func__);
4246 		return 1;
4247 	}
4248 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4249 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4250 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4251 		dewtprintk(ioc,
4252 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4253 				   handle,
4254 				   le16_to_cpu(mpi_reply->DevHandle), smid));
4255 		return 0;
4256 	}
4257 
4258 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4259 	dewtprintk(ioc,
4260 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4261 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4262 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4263 			    le32_to_cpu(mpi_reply->TerminationCount)));
4264 
4265 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4266 	if (!smid_sas_ctrl) {
4267 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4268 		if (!delayed_sc)
4269 			return _scsih_check_for_pending_tm(ioc, smid);
4270 		INIT_LIST_HEAD(&delayed_sc->list);
4271 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4272 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4273 		dewtprintk(ioc,
4274 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4275 				    handle));
4276 		return _scsih_check_for_pending_tm(ioc, smid);
4277 	}
4278 
4279 	dewtprintk(ioc,
4280 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4281 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4282 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4283 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4284 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4285 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4286 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4287 	ioc->put_smid_default(ioc, smid_sas_ctrl);
4288 
4289 	return _scsih_check_for_pending_tm(ioc, smid);
4290 }
4291 
4292 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4293  *				 issue to IOC or not.
4294  * @ioc: per adapter object
4295  * @scmd: pointer to scsi command object
4296  *
4297  * Returns true if scmd can be issued to IOC otherwise returns false.
4298  */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4299 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4300 	struct scsi_cmnd *scmd)
4301 {
4302 
4303 	if (ioc->pci_error_recovery)
4304 		return false;
4305 
4306 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4307 		if (ioc->remove_host)
4308 			return false;
4309 
4310 		return true;
4311 	}
4312 
4313 	if (ioc->remove_host) {
4314 
4315 		switch (scmd->cmnd[0]) {
4316 		case SYNCHRONIZE_CACHE:
4317 		case START_STOP:
4318 			return true;
4319 		default:
4320 			return false;
4321 		}
4322 	}
4323 
4324 	return true;
4325 }
4326 
4327 /**
4328  * _scsih_sas_control_complete - completion routine
4329  * @ioc: per adapter object
4330  * @smid: system request message index
4331  * @msix_index: MSIX table index supplied by the OS
4332  * @reply: reply message frame(lower 32bit addr)
4333  * Context: interrupt time.
4334  *
4335  * This is the sas iounit control completion routine.
4336  * This code is part of the code to initiate the device removal
4337  * handshake protocol with controller firmware.
4338  *
4339  * Return: 1 meaning mf should be freed from _base_interrupt
4340  *         0 means the mf is freed from this function.
4341  */
4342 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4343 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4344 	u8 msix_index, u32 reply)
4345 {
4346 	Mpi2SasIoUnitControlReply_t *mpi_reply =
4347 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4348 
4349 	if (likely(mpi_reply)) {
4350 		dewtprintk(ioc,
4351 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4352 				    le16_to_cpu(mpi_reply->DevHandle), smid,
4353 				    le16_to_cpu(mpi_reply->IOCStatus),
4354 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4355 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4356 		     MPI2_IOCSTATUS_SUCCESS) {
4357 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4358 			    ioc->device_remove_in_progress);
4359 		}
4360 	} else {
4361 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4362 			__FILE__, __LINE__, __func__);
4363 	}
4364 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4365 }
4366 
4367 /**
4368  * _scsih_tm_tr_volume_send - send target reset request for volumes
4369  * @ioc: per adapter object
4370  * @handle: device handle
4371  * Context: interrupt time.
4372  *
4373  * This is designed to send muliple task management request at the same
4374  * time to the fifo. If the fifo is full, we will append the request,
4375  * and process it in a future completion.
4376  */
4377 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4378 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4379 {
4380 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4381 	u16 smid;
4382 	struct _tr_list *delayed_tr;
4383 
4384 	if (ioc->pci_error_recovery) {
4385 		dewtprintk(ioc,
4386 			   ioc_info(ioc, "%s: host reset in progress!\n",
4387 				    __func__));
4388 		return;
4389 	}
4390 
4391 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4392 	if (!smid) {
4393 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4394 		if (!delayed_tr)
4395 			return;
4396 		INIT_LIST_HEAD(&delayed_tr->list);
4397 		delayed_tr->handle = handle;
4398 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4399 		dewtprintk(ioc,
4400 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4401 				    handle));
4402 		return;
4403 	}
4404 
4405 	dewtprintk(ioc,
4406 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4407 			    handle, smid, ioc->tm_tr_volume_cb_idx));
4408 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4409 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4410 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4411 	mpi_request->DevHandle = cpu_to_le16(handle);
4412 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4413 	ioc->put_smid_hi_priority(ioc, smid, 0);
4414 }
4415 
4416 /**
4417  * _scsih_tm_volume_tr_complete - target reset completion
4418  * @ioc: per adapter object
4419  * @smid: system request message index
4420  * @msix_index: MSIX table index supplied by the OS
4421  * @reply: reply message frame(lower 32bit addr)
4422  * Context: interrupt time.
4423  *
4424  * Return: 1 meaning mf should be freed from _base_interrupt
4425  *         0 means the mf is freed from this function.
4426  */
4427 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4428 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4429 	u8 msix_index, u32 reply)
4430 {
4431 	u16 handle;
4432 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4433 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4434 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4435 
4436 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4437 		dewtprintk(ioc,
4438 			   ioc_info(ioc, "%s: host reset in progress!\n",
4439 				    __func__));
4440 		return 1;
4441 	}
4442 	if (unlikely(!mpi_reply)) {
4443 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4444 			__FILE__, __LINE__, __func__);
4445 		return 1;
4446 	}
4447 
4448 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4449 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4450 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4451 		dewtprintk(ioc,
4452 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4453 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4454 				   smid));
4455 		return 0;
4456 	}
4457 
4458 	dewtprintk(ioc,
4459 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4460 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4461 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4462 			    le32_to_cpu(mpi_reply->TerminationCount)));
4463 
4464 	return _scsih_check_for_pending_tm(ioc, smid);
4465 }
4466 
4467 /**
4468  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4469  * @ioc: per adapter object
4470  * @smid: system request message index
4471  * @event: Event ID
4472  * @event_context: used to track events uniquely
4473  *
4474  * Context - processed in interrupt context.
4475  */
4476 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4477 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4478 				U32 event_context)
4479 {
4480 	Mpi2EventAckRequest_t *ack_request;
4481 	int i = smid - ioc->internal_smid;
4482 	unsigned long flags;
4483 
4484 	/* Without releasing the smid just update the
4485 	 * call back index and reuse the same smid for
4486 	 * processing this delayed request
4487 	 */
4488 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4489 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4490 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4491 
4492 	dewtprintk(ioc,
4493 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4494 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4495 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4496 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4497 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4498 	ack_request->Event = event;
4499 	ack_request->EventContext = event_context;
4500 	ack_request->VF_ID = 0;  /* TODO */
4501 	ack_request->VP_ID = 0;
4502 	ioc->put_smid_default(ioc, smid);
4503 }
4504 
4505 /**
4506  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4507  *				sas_io_unit_ctrl messages
4508  * @ioc: per adapter object
4509  * @smid: system request message index
4510  * @handle: device handle
4511  *
4512  * Context - processed in interrupt context.
4513  */
4514 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4515 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4516 					u16 smid, u16 handle)
4517 {
4518 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4519 	u32 ioc_state;
4520 	int i = smid - ioc->internal_smid;
4521 	unsigned long flags;
4522 
4523 	if (ioc->remove_host) {
4524 		dewtprintk(ioc,
4525 			   ioc_info(ioc, "%s: host has been removed\n",
4526 				    __func__));
4527 		return;
4528 	} else if (ioc->pci_error_recovery) {
4529 		dewtprintk(ioc,
4530 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4531 				    __func__));
4532 		return;
4533 	}
4534 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4535 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4536 		dewtprintk(ioc,
4537 			   ioc_info(ioc, "%s: host is not operational\n",
4538 				    __func__));
4539 		return;
4540 	}
4541 
4542 	/* Without releasing the smid just update the
4543 	 * call back index and reuse the same smid for
4544 	 * processing this delayed request
4545 	 */
4546 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4547 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4548 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4549 
4550 	dewtprintk(ioc,
4551 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4552 			    handle, smid, ioc->tm_sas_control_cb_idx));
4553 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4554 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4555 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4556 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4557 	mpi_request->DevHandle = cpu_to_le16(handle);
4558 	ioc->put_smid_default(ioc, smid);
4559 }
4560 
4561 /**
4562  * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4563  * @ioc: per adapter object
4564  * @smid: system request message index
4565  *
4566  * Context: Executed in interrupt context
4567  *
4568  * This will check delayed internal messages list, and process the
4569  * next request.
4570  *
4571  * Return: 1 meaning mf should be freed from _base_interrupt
4572  *         0 means the mf is freed from this function.
4573  */
4574 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4575 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4576 {
4577 	struct _sc_list *delayed_sc;
4578 	struct _event_ack_list *delayed_event_ack;
4579 
4580 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4581 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4582 						struct _event_ack_list, list);
4583 		_scsih_issue_delayed_event_ack(ioc, smid,
4584 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4585 		list_del(&delayed_event_ack->list);
4586 		kfree(delayed_event_ack);
4587 		return 0;
4588 	}
4589 
4590 	if (!list_empty(&ioc->delayed_sc_list)) {
4591 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4592 						struct _sc_list, list);
4593 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4594 						 delayed_sc->handle);
4595 		list_del(&delayed_sc->list);
4596 		kfree(delayed_sc);
4597 		return 0;
4598 	}
4599 	return 1;
4600 }
4601 
4602 /**
4603  * _scsih_check_for_pending_tm - check for pending task management
4604  * @ioc: per adapter object
4605  * @smid: system request message index
4606  *
4607  * This will check delayed target reset list, and feed the
4608  * next reqeust.
4609  *
4610  * Return: 1 meaning mf should be freed from _base_interrupt
4611  *         0 means the mf is freed from this function.
4612  */
4613 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4614 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4615 {
4616 	struct _tr_list *delayed_tr;
4617 
4618 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4619 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4620 		    struct _tr_list, list);
4621 		mpt3sas_base_free_smid(ioc, smid);
4622 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4623 		list_del(&delayed_tr->list);
4624 		kfree(delayed_tr);
4625 		return 0;
4626 	}
4627 
4628 	if (!list_empty(&ioc->delayed_tr_list)) {
4629 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4630 		    struct _tr_list, list);
4631 		mpt3sas_base_free_smid(ioc, smid);
4632 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4633 		list_del(&delayed_tr->list);
4634 		kfree(delayed_tr);
4635 		return 0;
4636 	}
4637 
4638 	return 1;
4639 }
4640 
4641 /**
4642  * _scsih_check_topo_delete_events - sanity check on topo events
4643  * @ioc: per adapter object
4644  * @event_data: the event data payload
4645  *
4646  * This routine added to better handle cable breaker.
4647  *
4648  * This handles the case where driver receives multiple expander
4649  * add and delete events in a single shot.  When there is a delete event
4650  * the routine will void any pending add events waiting in the event queue.
4651  */
4652 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4653 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4654 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4655 {
4656 	struct fw_event_work *fw_event;
4657 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4658 	u16 expander_handle;
4659 	struct _sas_node *sas_expander;
4660 	unsigned long flags;
4661 	int i, reason_code;
4662 	u16 handle;
4663 
4664 	for (i = 0 ; i < event_data->NumEntries; i++) {
4665 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4666 		if (!handle)
4667 			continue;
4668 		reason_code = event_data->PHY[i].PhyStatus &
4669 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4670 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4671 			_scsih_tm_tr_send(ioc, handle);
4672 	}
4673 
4674 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4675 	if (expander_handle < ioc->sas_hba.num_phys) {
4676 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4677 		return;
4678 	}
4679 	if (event_data->ExpStatus ==
4680 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4681 		/* put expander attached devices into blocking state */
4682 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4683 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4684 		    expander_handle);
4685 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4686 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4687 		do {
4688 			handle = find_first_bit(ioc->blocking_handles,
4689 			    ioc->facts.MaxDevHandle);
4690 			if (handle < ioc->facts.MaxDevHandle)
4691 				_scsih_block_io_device(ioc, handle);
4692 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4693 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4694 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4695 
4696 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4697 		return;
4698 
4699 	/* mark ignore flag for pending events */
4700 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4701 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4702 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4703 		    fw_event->ignore)
4704 			continue;
4705 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4706 				   fw_event->event_data;
4707 		if (local_event_data->ExpStatus ==
4708 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4709 		    local_event_data->ExpStatus ==
4710 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4711 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4712 			    expander_handle) {
4713 				dewtprintk(ioc,
4714 					   ioc_info(ioc, "setting ignoring flag\n"));
4715 				fw_event->ignore = 1;
4716 			}
4717 		}
4718 	}
4719 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4720 }
4721 
4722 /**
4723  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4724  * events
4725  * @ioc: per adapter object
4726  * @event_data: the event data payload
4727  *
4728  * This handles the case where driver receives multiple switch
4729  * or device add and delete events in a single shot.  When there
4730  * is a delete event the routine will void any pending add
4731  * events waiting in the event queue.
4732  */
4733 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4734 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4735 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4736 {
4737 	struct fw_event_work *fw_event;
4738 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4739 	unsigned long flags;
4740 	int i, reason_code;
4741 	u16 handle, switch_handle;
4742 
4743 	for (i = 0; i < event_data->NumEntries; i++) {
4744 		handle =
4745 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4746 		if (!handle)
4747 			continue;
4748 		reason_code = event_data->PortEntry[i].PortStatus;
4749 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4750 			_scsih_tm_tr_send(ioc, handle);
4751 	}
4752 
4753 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4754 	if (!switch_handle) {
4755 		_scsih_block_io_to_pcie_children_attached_directly(
4756 							ioc, event_data);
4757 		return;
4758 	}
4759     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4760 	if ((event_data->SwitchStatus
4761 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4762 		(event_data->SwitchStatus ==
4763 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4764 		_scsih_block_io_to_pcie_children_attached_directly(
4765 							ioc, event_data);
4766 
4767 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4768 		return;
4769 
4770 	/* mark ignore flag for pending events */
4771 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4772 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4773 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4774 			fw_event->ignore)
4775 			continue;
4776 		local_event_data =
4777 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4778 			fw_event->event_data;
4779 		if (local_event_data->SwitchStatus ==
4780 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4781 		    local_event_data->SwitchStatus ==
4782 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4783 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4784 				switch_handle) {
4785 				dewtprintk(ioc,
4786 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4787 				fw_event->ignore = 1;
4788 			}
4789 		}
4790 	}
4791 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4792 }
4793 
4794 /**
4795  * _scsih_set_volume_delete_flag - setting volume delete flag
4796  * @ioc: per adapter object
4797  * @handle: device handle
4798  *
4799  * This returns nothing.
4800  */
4801 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4802 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4803 {
4804 	struct _raid_device *raid_device;
4805 	struct MPT3SAS_TARGET *sas_target_priv_data;
4806 	unsigned long flags;
4807 
4808 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4809 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4810 	if (raid_device && raid_device->starget &&
4811 	    raid_device->starget->hostdata) {
4812 		sas_target_priv_data =
4813 		    raid_device->starget->hostdata;
4814 		sas_target_priv_data->deleted = 1;
4815 		dewtprintk(ioc,
4816 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4817 				    handle, (u64)raid_device->wwid));
4818 	}
4819 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4820 }
4821 
4822 /**
4823  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4824  * @handle: input handle
4825  * @a: handle for volume a
4826  * @b: handle for volume b
4827  *
4828  * IR firmware only supports two raid volumes.  The purpose of this
4829  * routine is to set the volume handle in either a or b. When the given
4830  * input handle is non-zero, or when a and b have not been set before.
4831  */
4832 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4833 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4834 {
4835 	if (!handle || handle == *a || handle == *b)
4836 		return;
4837 	if (!*a)
4838 		*a = handle;
4839 	else if (!*b)
4840 		*b = handle;
4841 }
4842 
4843 /**
4844  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4845  * @ioc: per adapter object
4846  * @event_data: the event data payload
4847  * Context: interrupt time.
4848  *
4849  * This routine will send target reset to volume, followed by target
4850  * resets to the PDs. This is called when a PD has been removed, or
4851  * volume has been deleted or removed. When the target reset is sent
4852  * to volume, the PD target resets need to be queued to start upon
4853  * completion of the volume target reset.
4854  */
4855 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4856 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4857 	Mpi2EventDataIrConfigChangeList_t *event_data)
4858 {
4859 	Mpi2EventIrConfigElement_t *element;
4860 	int i;
4861 	u16 handle, volume_handle, a, b;
4862 	struct _tr_list *delayed_tr;
4863 
4864 	a = 0;
4865 	b = 0;
4866 
4867 	if (ioc->is_warpdrive)
4868 		return;
4869 
4870 	/* Volume Resets for Deleted or Removed */
4871 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4872 	for (i = 0; i < event_data->NumElements; i++, element++) {
4873 		if (le32_to_cpu(event_data->Flags) &
4874 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4875 			continue;
4876 		if (element->ReasonCode ==
4877 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4878 		    element->ReasonCode ==
4879 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4880 			volume_handle = le16_to_cpu(element->VolDevHandle);
4881 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4882 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4883 		}
4884 	}
4885 
4886 	/* Volume Resets for UNHIDE events */
4887 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4888 	for (i = 0; i < event_data->NumElements; i++, element++) {
4889 		if (le32_to_cpu(event_data->Flags) &
4890 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4891 			continue;
4892 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4893 			volume_handle = le16_to_cpu(element->VolDevHandle);
4894 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4895 		}
4896 	}
4897 
4898 	if (a)
4899 		_scsih_tm_tr_volume_send(ioc, a);
4900 	if (b)
4901 		_scsih_tm_tr_volume_send(ioc, b);
4902 
4903 	/* PD target resets */
4904 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4905 	for (i = 0; i < event_data->NumElements; i++, element++) {
4906 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4907 			continue;
4908 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4909 		volume_handle = le16_to_cpu(element->VolDevHandle);
4910 		clear_bit(handle, ioc->pd_handles);
4911 		if (!volume_handle)
4912 			_scsih_tm_tr_send(ioc, handle);
4913 		else if (volume_handle == a || volume_handle == b) {
4914 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4915 			BUG_ON(!delayed_tr);
4916 			INIT_LIST_HEAD(&delayed_tr->list);
4917 			delayed_tr->handle = handle;
4918 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4919 			dewtprintk(ioc,
4920 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4921 					    handle));
4922 		} else
4923 			_scsih_tm_tr_send(ioc, handle);
4924 	}
4925 }
4926 
4927 
4928 /**
4929  * _scsih_check_volume_delete_events - set delete flag for volumes
4930  * @ioc: per adapter object
4931  * @event_data: the event data payload
4932  * Context: interrupt time.
4933  *
4934  * This will handle the case when the cable connected to entire volume is
4935  * pulled. We will take care of setting the deleted flag so normal IO will
4936  * not be sent.
4937  */
4938 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4939 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4940 	Mpi2EventDataIrVolume_t *event_data)
4941 {
4942 	u32 state;
4943 
4944 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4945 		return;
4946 	state = le32_to_cpu(event_data->NewValue);
4947 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4948 	    MPI2_RAID_VOL_STATE_FAILED)
4949 		_scsih_set_volume_delete_flag(ioc,
4950 		    le16_to_cpu(event_data->VolDevHandle));
4951 }
4952 
4953 /**
4954  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4955  * @ioc: per adapter object
4956  * @event_data: the temp threshold event data
4957  * Context: interrupt time.
4958  */
4959 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4960 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4961 	Mpi2EventDataTemperature_t *event_data)
4962 {
4963 	u32 doorbell;
4964 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4965 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4966 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4967 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4968 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4969 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4970 			event_data->SensorNum);
4971 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4972 			event_data->CurrentTemperature);
4973 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4974 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4975 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4976 			    MPI2_IOC_STATE_FAULT) {
4977 				mpt3sas_print_fault_code(ioc,
4978 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4979 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4980 			    MPI2_IOC_STATE_COREDUMP) {
4981 				mpt3sas_print_coredump_info(ioc,
4982 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4983 			}
4984 		}
4985 	}
4986 }
4987 
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4988 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4989 {
4990 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4991 
4992 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4993 		return 0;
4994 
4995 	if (pending)
4996 		return test_and_set_bit(0, &priv->ata_command_pending);
4997 
4998 	clear_bit(0, &priv->ata_command_pending);
4999 	return 0;
5000 }
5001 
5002 /**
5003  * _scsih_flush_running_cmds - completing outstanding commands.
5004  * @ioc: per adapter object
5005  *
5006  * The flushing out of all pending scmd commands following host reset,
5007  * where all IO is dropped to the floor.
5008  */
5009 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)5010 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5011 {
5012 	struct scsi_cmnd *scmd;
5013 	struct scsiio_tracker *st;
5014 	u16 smid;
5015 	int count = 0;
5016 
5017 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5018 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5019 		if (!scmd)
5020 			continue;
5021 		count++;
5022 		_scsih_set_satl_pending(scmd, false);
5023 		st = scsi_cmd_priv(scmd);
5024 		mpt3sas_base_clear_st(ioc, st);
5025 		scsi_dma_unmap(scmd);
5026 		if (ioc->pci_error_recovery || ioc->remove_host)
5027 			scmd->result = DID_NO_CONNECT << 16;
5028 		else
5029 			scmd->result = DID_RESET << 16;
5030 		scsi_done(scmd);
5031 	}
5032 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5033 }
5034 
5035 /**
5036  * _scsih_setup_eedp - setup MPI request for EEDP transfer
5037  * @ioc: per adapter object
5038  * @scmd: pointer to scsi command object
5039  * @mpi_request: pointer to the SCSI_IO request message frame
5040  *
5041  * Supporting protection 1 and 3.
5042  */
5043 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)5044 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5045 	Mpi25SCSIIORequest_t *mpi_request)
5046 {
5047 	u16 eedp_flags;
5048 	Mpi25SCSIIORequest_t *mpi_request_3v =
5049 	   (Mpi25SCSIIORequest_t *)mpi_request;
5050 
5051 	switch (scsi_get_prot_op(scmd)) {
5052 	case SCSI_PROT_READ_STRIP:
5053 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5054 		break;
5055 	case SCSI_PROT_WRITE_INSERT:
5056 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5057 		break;
5058 	default:
5059 		return;
5060 	}
5061 
5062 	if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5063 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5064 
5065 	if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5066 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5067 
5068 	if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5069 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5070 
5071 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5072 			cpu_to_be32(scsi_prot_ref_tag(scmd));
5073 	}
5074 
5075 	mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5076 
5077 	if (ioc->is_gen35_ioc)
5078 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5079 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5080 }
5081 
5082 /**
5083  * _scsih_eedp_error_handling - return sense code for EEDP errors
5084  * @scmd: pointer to scsi command object
5085  * @ioc_status: ioc status
5086  */
5087 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)5088 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5089 {
5090 	u8 ascq;
5091 
5092 	switch (ioc_status) {
5093 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5094 		ascq = 0x01;
5095 		break;
5096 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5097 		ascq = 0x02;
5098 		break;
5099 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5100 		ascq = 0x03;
5101 		break;
5102 	default:
5103 		ascq = 0x00;
5104 		break;
5105 	}
5106 	scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5107 	set_host_byte(scmd, DID_ABORT);
5108 }
5109 
5110 /**
5111  * scsih_qcmd - main scsi request entry point
5112  * @shost: SCSI host pointer
5113  * @scmd: pointer to scsi command object
5114  *
5115  * The callback index is set inside `ioc->scsi_io_cb_idx`.
5116  *
5117  * Return: 0 on success.  If there's a failure, return either:
5118  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5119  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5120  */
5121 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5122 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5123 {
5124 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5125 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5126 	struct MPT3SAS_TARGET *sas_target_priv_data;
5127 	struct _raid_device *raid_device;
5128 	struct request *rq = scsi_cmd_to_rq(scmd);
5129 	int class;
5130 	Mpi25SCSIIORequest_t *mpi_request;
5131 	struct _pcie_device *pcie_device = NULL;
5132 	u32 mpi_control;
5133 	u16 smid;
5134 	u16 handle;
5135 
5136 	if (ioc->logging_level & MPT_DEBUG_SCSI)
5137 		scsi_print_command(scmd);
5138 
5139 	sas_device_priv_data = scmd->device->hostdata;
5140 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5141 		scmd->result = DID_NO_CONNECT << 16;
5142 		scsi_done(scmd);
5143 		return 0;
5144 	}
5145 
5146 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5147 		scmd->result = DID_NO_CONNECT << 16;
5148 		scsi_done(scmd);
5149 		return 0;
5150 	}
5151 
5152 	sas_target_priv_data = sas_device_priv_data->sas_target;
5153 
5154 	/* invalid device handle */
5155 	handle = sas_target_priv_data->handle;
5156 
5157 	/*
5158 	 * Avoid error handling escallation when device is disconnected
5159 	 */
5160 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5161 		if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5162 		    scmd->cmnd[0] == TEST_UNIT_READY) {
5163 			scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5164 			scsi_done(scmd);
5165 			return 0;
5166 		}
5167 	}
5168 
5169 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5170 		scmd->result = DID_NO_CONNECT << 16;
5171 		scsi_done(scmd);
5172 		return 0;
5173 	}
5174 
5175 
5176 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5177 		/* host recovery or link resets sent via IOCTLs */
5178 		return SCSI_MLQUEUE_HOST_BUSY;
5179 	} else if (sas_target_priv_data->deleted) {
5180 		/* device has been deleted */
5181 		scmd->result = DID_NO_CONNECT << 16;
5182 		scsi_done(scmd);
5183 		return 0;
5184 	} else if (sas_target_priv_data->tm_busy ||
5185 		   sas_device_priv_data->block) {
5186 		/* device busy with task management */
5187 		return SCSI_MLQUEUE_DEVICE_BUSY;
5188 	}
5189 
5190 	/*
5191 	 * Bug work around for firmware SATL handling.  The loop
5192 	 * is based on atomic operations and ensures consistency
5193 	 * since we're lockless at this point
5194 	 */
5195 	do {
5196 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5197 			return SCSI_MLQUEUE_DEVICE_BUSY;
5198 	} while (_scsih_set_satl_pending(scmd, true));
5199 
5200 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5201 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
5202 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5203 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5204 	else
5205 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5206 
5207 	/* set tags */
5208 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5209 	/* NCQ Prio supported, make sure control indicated high priority */
5210 	if (sas_device_priv_data->ncq_prio_enable) {
5211 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5212 		if (class == IOPRIO_CLASS_RT)
5213 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5214 	}
5215 	/* Make sure Device is not raid volume.
5216 	 * We do not expose raid functionality to upper layer for warpdrive.
5217 	 */
5218 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5219 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5220 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5221 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5222 
5223 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5224 	if (!smid) {
5225 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5226 		_scsih_set_satl_pending(scmd, false);
5227 		goto out;
5228 	}
5229 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5230 	memset(mpi_request, 0, ioc->request_sz);
5231 	_scsih_setup_eedp(ioc, scmd, mpi_request);
5232 
5233 	if (scmd->cmd_len == 32)
5234 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5235 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5236 	if (sas_device_priv_data->sas_target->flags &
5237 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5238 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5239 	else
5240 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5241 	mpi_request->DevHandle = cpu_to_le16(handle);
5242 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5243 	mpi_request->Control = cpu_to_le32(mpi_control);
5244 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5245 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5246 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5247 	mpi_request->SenseBufferLowAddress =
5248 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5249 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5250 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5251 	    mpi_request->LUN);
5252 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5253 
5254 	if (mpi_request->DataLength) {
5255 		pcie_device = sas_target_priv_data->pcie_dev;
5256 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5257 			mpt3sas_base_free_smid(ioc, smid);
5258 			_scsih_set_satl_pending(scmd, false);
5259 			goto out;
5260 		}
5261 	} else
5262 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5263 
5264 	raid_device = sas_target_priv_data->raid_device;
5265 	if (raid_device && raid_device->direct_io_enabled)
5266 		mpt3sas_setup_direct_io(ioc, scmd,
5267 			raid_device, mpi_request);
5268 
5269 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5270 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5271 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5272 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5273 			ioc->put_smid_fast_path(ioc, smid, handle);
5274 		} else
5275 			ioc->put_smid_scsi_io(ioc, smid,
5276 			    le16_to_cpu(mpi_request->DevHandle));
5277 	} else
5278 		ioc->put_smid_default(ioc, smid);
5279 	return 0;
5280 
5281  out:
5282 	return SCSI_MLQUEUE_HOST_BUSY;
5283 }
5284 
5285 /**
5286  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5287  * @sense_buffer: sense data returned by target
5288  * @data: normalized skey/asc/ascq
5289  */
5290 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5291 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5292 {
5293 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5294 		/* descriptor format */
5295 		data->skey = sense_buffer[1] & 0x0F;
5296 		data->asc = sense_buffer[2];
5297 		data->ascq = sense_buffer[3];
5298 	} else {
5299 		/* fixed format */
5300 		data->skey = sense_buffer[2] & 0x0F;
5301 		data->asc = sense_buffer[12];
5302 		data->ascq = sense_buffer[13];
5303 	}
5304 }
5305 
5306 /**
5307  * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5308  * @ioc: per adapter object
5309  * @scmd: pointer to scsi command object
5310  * @mpi_reply: reply mf payload returned from firmware
5311  * @smid: ?
5312  *
5313  * scsi_status - SCSI Status code returned from target device
5314  * scsi_state - state info associated with SCSI_IO determined by ioc
5315  * ioc_status - ioc supplied status info
5316  */
5317 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5318 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5319 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5320 {
5321 	u32 response_info;
5322 	u8 *response_bytes;
5323 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5324 	    MPI2_IOCSTATUS_MASK;
5325 	u8 scsi_state = mpi_reply->SCSIState;
5326 	u8 scsi_status = mpi_reply->SCSIStatus;
5327 	char *desc_ioc_state = NULL;
5328 	char *desc_scsi_status = NULL;
5329 	char *desc_scsi_state = ioc->tmp_string;
5330 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5331 	struct _sas_device *sas_device = NULL;
5332 	struct _pcie_device *pcie_device = NULL;
5333 	struct scsi_target *starget = scmd->device->sdev_target;
5334 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5335 	char *device_str = NULL;
5336 
5337 	if (!priv_target)
5338 		return;
5339 	if (ioc->hide_ir_msg)
5340 		device_str = "WarpDrive";
5341 	else
5342 		device_str = "volume";
5343 
5344 	if (log_info == 0x31170000)
5345 		return;
5346 
5347 	switch (ioc_status) {
5348 	case MPI2_IOCSTATUS_SUCCESS:
5349 		desc_ioc_state = "success";
5350 		break;
5351 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5352 		desc_ioc_state = "invalid function";
5353 		break;
5354 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5355 		desc_ioc_state = "scsi recovered error";
5356 		break;
5357 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5358 		desc_ioc_state = "scsi invalid dev handle";
5359 		break;
5360 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5361 		desc_ioc_state = "scsi device not there";
5362 		break;
5363 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5364 		desc_ioc_state = "scsi data overrun";
5365 		break;
5366 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5367 		desc_ioc_state = "scsi data underrun";
5368 		break;
5369 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5370 		desc_ioc_state = "scsi io data error";
5371 		break;
5372 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5373 		desc_ioc_state = "scsi protocol error";
5374 		break;
5375 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5376 		desc_ioc_state = "scsi task terminated";
5377 		break;
5378 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5379 		desc_ioc_state = "scsi residual mismatch";
5380 		break;
5381 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5382 		desc_ioc_state = "scsi task mgmt failed";
5383 		break;
5384 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5385 		desc_ioc_state = "scsi ioc terminated";
5386 		break;
5387 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5388 		desc_ioc_state = "scsi ext terminated";
5389 		break;
5390 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5391 		desc_ioc_state = "eedp guard error";
5392 		break;
5393 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5394 		desc_ioc_state = "eedp ref tag error";
5395 		break;
5396 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5397 		desc_ioc_state = "eedp app tag error";
5398 		break;
5399 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5400 		desc_ioc_state = "insufficient power";
5401 		break;
5402 	default:
5403 		desc_ioc_state = "unknown";
5404 		break;
5405 	}
5406 
5407 	switch (scsi_status) {
5408 	case MPI2_SCSI_STATUS_GOOD:
5409 		desc_scsi_status = "good";
5410 		break;
5411 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5412 		desc_scsi_status = "check condition";
5413 		break;
5414 	case MPI2_SCSI_STATUS_CONDITION_MET:
5415 		desc_scsi_status = "condition met";
5416 		break;
5417 	case MPI2_SCSI_STATUS_BUSY:
5418 		desc_scsi_status = "busy";
5419 		break;
5420 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5421 		desc_scsi_status = "intermediate";
5422 		break;
5423 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5424 		desc_scsi_status = "intermediate condmet";
5425 		break;
5426 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5427 		desc_scsi_status = "reservation conflict";
5428 		break;
5429 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5430 		desc_scsi_status = "command terminated";
5431 		break;
5432 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5433 		desc_scsi_status = "task set full";
5434 		break;
5435 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5436 		desc_scsi_status = "aca active";
5437 		break;
5438 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5439 		desc_scsi_status = "task aborted";
5440 		break;
5441 	default:
5442 		desc_scsi_status = "unknown";
5443 		break;
5444 	}
5445 
5446 	desc_scsi_state[0] = '\0';
5447 	if (!scsi_state)
5448 		desc_scsi_state = " ";
5449 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5450 		strcat(desc_scsi_state, "response info ");
5451 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5452 		strcat(desc_scsi_state, "state terminated ");
5453 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5454 		strcat(desc_scsi_state, "no status ");
5455 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5456 		strcat(desc_scsi_state, "autosense failed ");
5457 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5458 		strcat(desc_scsi_state, "autosense valid ");
5459 
5460 	scsi_print_command(scmd);
5461 
5462 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5463 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5464 			 device_str, (u64)priv_target->sas_address);
5465 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5466 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5467 		if (pcie_device) {
5468 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5469 				 (u64)pcie_device->wwid, pcie_device->port_num);
5470 			if (pcie_device->enclosure_handle != 0)
5471 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5472 					 (u64)pcie_device->enclosure_logical_id,
5473 					 pcie_device->slot);
5474 			if (pcie_device->connector_name[0])
5475 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5476 					 pcie_device->enclosure_level,
5477 					 pcie_device->connector_name);
5478 			pcie_device_put(pcie_device);
5479 		}
5480 	} else {
5481 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5482 		if (sas_device) {
5483 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5484 				 (u64)sas_device->sas_address, sas_device->phy);
5485 
5486 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5487 			    NULL, NULL);
5488 
5489 			sas_device_put(sas_device);
5490 		}
5491 	}
5492 
5493 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5494 		 le16_to_cpu(mpi_reply->DevHandle),
5495 		 desc_ioc_state, ioc_status, smid);
5496 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5497 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5498 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5499 		 le16_to_cpu(mpi_reply->TaskTag),
5500 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5501 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5502 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5503 
5504 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5505 		struct sense_info data;
5506 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5507 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5508 			 data.skey, data.asc, data.ascq,
5509 			 le32_to_cpu(mpi_reply->SenseCount));
5510 	}
5511 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5512 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5513 		response_bytes = (u8 *)&response_info;
5514 		_scsih_response_code(ioc, response_bytes[0]);
5515 	}
5516 }
5517 
5518 /**
5519  * _scsih_turn_on_pfa_led - illuminate PFA LED
5520  * @ioc: per adapter object
5521  * @handle: device handle
5522  * Context: process
5523  */
5524 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5525 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5526 {
5527 	Mpi2SepReply_t mpi_reply;
5528 	Mpi2SepRequest_t mpi_request;
5529 	struct _sas_device *sas_device;
5530 
5531 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5532 	if (!sas_device)
5533 		return;
5534 
5535 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5536 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5537 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5538 	mpi_request.SlotStatus =
5539 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5540 	mpi_request.DevHandle = cpu_to_le16(handle);
5541 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5542 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5543 	    &mpi_request)) != 0) {
5544 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5545 			__FILE__, __LINE__, __func__);
5546 		goto out;
5547 	}
5548 	sas_device->pfa_led_on = 1;
5549 
5550 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5551 		dewtprintk(ioc,
5552 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5553 				    le16_to_cpu(mpi_reply.IOCStatus),
5554 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5555 		goto out;
5556 	}
5557 out:
5558 	sas_device_put(sas_device);
5559 }
5560 
5561 /**
5562  * _scsih_turn_off_pfa_led - turn off Fault LED
5563  * @ioc: per adapter object
5564  * @sas_device: sas device whose PFA LED has to turned off
5565  * Context: process
5566  */
5567 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5568 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5569 	struct _sas_device *sas_device)
5570 {
5571 	Mpi2SepReply_t mpi_reply;
5572 	Mpi2SepRequest_t mpi_request;
5573 
5574 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5575 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5576 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5577 	mpi_request.SlotStatus = 0;
5578 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5579 	mpi_request.DevHandle = 0;
5580 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5581 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5582 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5583 		&mpi_request)) != 0) {
5584 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5585 			__FILE__, __LINE__, __func__);
5586 		return;
5587 	}
5588 
5589 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5590 		dewtprintk(ioc,
5591 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5592 				    le16_to_cpu(mpi_reply.IOCStatus),
5593 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5594 		return;
5595 	}
5596 }
5597 
5598 /**
5599  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5600  * @ioc: per adapter object
5601  * @handle: device handle
5602  * Context: interrupt.
5603  */
5604 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5605 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5606 {
5607 	struct fw_event_work *fw_event;
5608 
5609 	fw_event = alloc_fw_event_work(0);
5610 	if (!fw_event)
5611 		return;
5612 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5613 	fw_event->device_handle = handle;
5614 	fw_event->ioc = ioc;
5615 	_scsih_fw_event_add(ioc, fw_event);
5616 	fw_event_work_put(fw_event);
5617 }
5618 
5619 /**
5620  * _scsih_smart_predicted_fault - process smart errors
5621  * @ioc: per adapter object
5622  * @handle: device handle
5623  * Context: interrupt.
5624  */
5625 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5626 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5627 {
5628 	struct scsi_target *starget;
5629 	struct MPT3SAS_TARGET *sas_target_priv_data;
5630 	Mpi2EventNotificationReply_t *event_reply;
5631 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5632 	struct _sas_device *sas_device;
5633 	ssize_t sz;
5634 	unsigned long flags;
5635 
5636 	/* only handle non-raid devices */
5637 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5638 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5639 	if (!sas_device)
5640 		goto out_unlock;
5641 
5642 	starget = sas_device->starget;
5643 	sas_target_priv_data = starget->hostdata;
5644 
5645 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5646 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5647 		goto out_unlock;
5648 
5649 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5650 
5651 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5652 
5653 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5654 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5655 
5656 	/* insert into event log */
5657 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5658 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5659 	event_reply = kzalloc(sz, GFP_ATOMIC);
5660 	if (!event_reply) {
5661 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5662 			__FILE__, __LINE__, __func__);
5663 		goto out;
5664 	}
5665 
5666 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5667 	event_reply->Event =
5668 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5669 	event_reply->MsgLength = sz/4;
5670 	event_reply->EventDataLength =
5671 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5672 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5673 	    event_reply->EventData;
5674 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5675 	event_data->ASC = 0x5D;
5676 	event_data->DevHandle = cpu_to_le16(handle);
5677 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5678 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5679 	kfree(event_reply);
5680 out:
5681 	if (sas_device)
5682 		sas_device_put(sas_device);
5683 	return;
5684 
5685 out_unlock:
5686 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5687 	goto out;
5688 }
5689 
5690 /**
5691  * _scsih_io_done - scsi request callback
5692  * @ioc: per adapter object
5693  * @smid: system request message index
5694  * @msix_index: MSIX table index supplied by the OS
5695  * @reply: reply message frame(lower 32bit addr)
5696  *
5697  * Callback handler when using _scsih_qcmd.
5698  *
5699  * Return: 1 meaning mf should be freed from _base_interrupt
5700  *         0 means the mf is freed from this function.
5701  */
5702 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5703 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5704 {
5705 	Mpi25SCSIIORequest_t *mpi_request;
5706 	Mpi2SCSIIOReply_t *mpi_reply;
5707 	struct scsi_cmnd *scmd;
5708 	struct scsiio_tracker *st;
5709 	u16 ioc_status;
5710 	u32 xfer_cnt;
5711 	u8 scsi_state;
5712 	u8 scsi_status;
5713 	u32 log_info;
5714 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5715 	u32 response_code = 0;
5716 
5717 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5718 
5719 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5720 	if (scmd == NULL)
5721 		return 1;
5722 
5723 	_scsih_set_satl_pending(scmd, false);
5724 
5725 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5726 
5727 	if (mpi_reply == NULL) {
5728 		scmd->result = DID_OK << 16;
5729 		goto out;
5730 	}
5731 
5732 	sas_device_priv_data = scmd->device->hostdata;
5733 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5734 	     sas_device_priv_data->sas_target->deleted) {
5735 		scmd->result = DID_NO_CONNECT << 16;
5736 		goto out;
5737 	}
5738 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5739 
5740 	/*
5741 	 * WARPDRIVE: If direct_io is set then it is directIO,
5742 	 * the failed direct I/O should be redirected to volume
5743 	 */
5744 	st = scsi_cmd_priv(scmd);
5745 	if (st->direct_io &&
5746 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5747 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5748 		st->direct_io = 0;
5749 		st->scmd = scmd;
5750 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5751 		mpi_request->DevHandle =
5752 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5753 		ioc->put_smid_scsi_io(ioc, smid,
5754 		    sas_device_priv_data->sas_target->handle);
5755 		return 0;
5756 	}
5757 	/* turning off TLR */
5758 	scsi_state = mpi_reply->SCSIState;
5759 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5760 		response_code =
5761 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5762 	if (!sas_device_priv_data->tlr_snoop_check) {
5763 		sas_device_priv_data->tlr_snoop_check++;
5764 		if ((!ioc->is_warpdrive &&
5765 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5766 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5767 		    && sas_is_tlr_enabled(scmd->device) &&
5768 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5769 			sas_disable_tlr(scmd->device);
5770 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5771 		}
5772 	}
5773 
5774 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5775 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5776 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5777 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5778 	else
5779 		log_info = 0;
5780 	ioc_status &= MPI2_IOCSTATUS_MASK;
5781 	scsi_status = mpi_reply->SCSIStatus;
5782 
5783 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5784 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5785 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5786 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5787 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5788 	}
5789 
5790 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5791 		struct sense_info data;
5792 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5793 		    smid);
5794 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5795 		    le32_to_cpu(mpi_reply->SenseCount));
5796 		memcpy(scmd->sense_buffer, sense_data, sz);
5797 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5798 		/* failure prediction threshold exceeded */
5799 		if (data.asc == 0x5D)
5800 			_scsih_smart_predicted_fault(ioc,
5801 			    le16_to_cpu(mpi_reply->DevHandle));
5802 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5803 
5804 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5805 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5806 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5807 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5808 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5809 	}
5810 	switch (ioc_status) {
5811 	case MPI2_IOCSTATUS_BUSY:
5812 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5813 		scmd->result = SAM_STAT_BUSY;
5814 		break;
5815 
5816 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5817 		scmd->result = DID_NO_CONNECT << 16;
5818 		break;
5819 
5820 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5821 		if (sas_device_priv_data->block) {
5822 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5823 			goto out;
5824 		}
5825 		if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
5826 			/*
5827 			 * This is a ATA NCQ command aborted due to another NCQ
5828 			 * command failure. We must retry this command
5829 			 * immediately but without incrementing its retry
5830 			 * counter.
5831 			 */
5832 			WARN_ON_ONCE(xfer_cnt != 0);
5833 			scmd->result = DID_IMM_RETRY << 16;
5834 			break;
5835 		}
5836 		if (log_info == 0x31110630) {
5837 			if (scmd->retries > 2) {
5838 				scmd->result = DID_NO_CONNECT << 16;
5839 				scsi_device_set_state(scmd->device,
5840 				    SDEV_OFFLINE);
5841 			} else {
5842 				scmd->result = DID_SOFT_ERROR << 16;
5843 				scmd->device->expecting_cc_ua = 1;
5844 			}
5845 			break;
5846 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5847 			scmd->result = DID_RESET << 16;
5848 			break;
5849 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5850 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5851 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5852 			scmd->result = DID_RESET << 16;
5853 			break;
5854 		}
5855 		scmd->result = DID_SOFT_ERROR << 16;
5856 		break;
5857 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5858 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5859 		scmd->result = DID_RESET << 16;
5860 		break;
5861 
5862 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5863 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5864 			scmd->result = DID_SOFT_ERROR << 16;
5865 		else
5866 			scmd->result = (DID_OK << 16) | scsi_status;
5867 		break;
5868 
5869 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5870 		scmd->result = (DID_OK << 16) | scsi_status;
5871 
5872 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5873 			break;
5874 
5875 		if (xfer_cnt < scmd->underflow) {
5876 			if (scsi_status == SAM_STAT_BUSY)
5877 				scmd->result = SAM_STAT_BUSY;
5878 			else
5879 				scmd->result = DID_SOFT_ERROR << 16;
5880 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5881 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5882 			scmd->result = DID_SOFT_ERROR << 16;
5883 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5884 			scmd->result = DID_RESET << 16;
5885 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5886 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5887 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5888 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5889 					 0x20, 0);
5890 		}
5891 		break;
5892 
5893 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5894 		scsi_set_resid(scmd, 0);
5895 		fallthrough;
5896 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5897 	case MPI2_IOCSTATUS_SUCCESS:
5898 		scmd->result = (DID_OK << 16) | scsi_status;
5899 		if (response_code ==
5900 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5901 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5902 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5903 			scmd->result = DID_SOFT_ERROR << 16;
5904 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5905 			scmd->result = DID_RESET << 16;
5906 		break;
5907 
5908 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5909 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5910 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5911 		_scsih_eedp_error_handling(scmd, ioc_status);
5912 		break;
5913 
5914 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5915 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5916 	case MPI2_IOCSTATUS_INVALID_SGL:
5917 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5918 	case MPI2_IOCSTATUS_INVALID_FIELD:
5919 	case MPI2_IOCSTATUS_INVALID_STATE:
5920 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5921 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5922 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5923 	default:
5924 		scmd->result = DID_SOFT_ERROR << 16;
5925 		break;
5926 
5927 	}
5928 
5929 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5930 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5931 
5932  out:
5933 
5934 	scsi_dma_unmap(scmd);
5935 	mpt3sas_base_free_smid(ioc, smid);
5936 	scsi_done(scmd);
5937 	return 0;
5938 }
5939 
5940 /**
5941  * _scsih_update_vphys_after_reset - update the Port's
5942  *			vphys_list after reset
5943  * @ioc: per adapter object
5944  *
5945  * Returns nothing.
5946  */
5947 static void
_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER * ioc)5948 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5949 {
5950 	u16 sz, ioc_status;
5951 	int i;
5952 	Mpi2ConfigReply_t mpi_reply;
5953 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5954 	u16 attached_handle;
5955 	u64 attached_sas_addr;
5956 	u8 found = 0, port_id;
5957 	Mpi2SasPhyPage0_t phy_pg0;
5958 	struct hba_port *port, *port_next, *mport;
5959 	struct virtual_phy *vphy, *vphy_next;
5960 	struct _sas_device *sas_device;
5961 
5962 	/*
5963 	 * Mark all the vphys objects as dirty.
5964 	 */
5965 	list_for_each_entry_safe(port, port_next,
5966 	    &ioc->port_table_list, list) {
5967 		if (!port->vphys_mask)
5968 			continue;
5969 		list_for_each_entry_safe(vphy, vphy_next,
5970 		    &port->vphys_list, list) {
5971 			vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5972 		}
5973 	}
5974 
5975 	/*
5976 	 * Read SASIOUnitPage0 to get each HBA Phy's data.
5977 	 */
5978 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
5979 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5980 	if (!sas_iounit_pg0) {
5981 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5982 		    __FILE__, __LINE__, __func__);
5983 		return;
5984 	}
5985 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5986 	    sas_iounit_pg0, sz)) != 0)
5987 		goto out;
5988 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5989 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5990 		goto out;
5991 	/*
5992 	 * Loop over each HBA Phy.
5993 	 */
5994 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5995 		/*
5996 		 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5997 		 */
5998 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5999 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6000 			continue;
6001 		/*
6002 		 * Check whether Phy is connected to SEP device or not,
6003 		 * if it is SEP device then read the Phy's SASPHYPage0 data to
6004 		 * determine whether Phy is a virtual Phy or not. if it is
6005 		 * virtual phy then it is conformed that the attached remote
6006 		 * device is a HBA's vSES device.
6007 		 */
6008 		if (!(le32_to_cpu(
6009 		    sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6010 		    MPI2_SAS_DEVICE_INFO_SEP))
6011 			continue;
6012 
6013 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6014 		    i))) {
6015 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6016 			    __FILE__, __LINE__, __func__);
6017 			continue;
6018 		}
6019 
6020 		if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6021 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6022 			continue;
6023 		/*
6024 		 * Get the vSES device's SAS Address.
6025 		 */
6026 		attached_handle = le16_to_cpu(
6027 		    sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6028 		if (_scsih_get_sas_address(ioc, attached_handle,
6029 		    &attached_sas_addr) != 0) {
6030 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6031 			    __FILE__, __LINE__, __func__);
6032 			continue;
6033 		}
6034 
6035 		found = 0;
6036 		port = port_next = NULL;
6037 		/*
6038 		 * Loop over each virtual_phy object from
6039 		 * each port's vphys_list.
6040 		 */
6041 		list_for_each_entry_safe(port,
6042 		    port_next, &ioc->port_table_list, list) {
6043 			if (!port->vphys_mask)
6044 				continue;
6045 			list_for_each_entry_safe(vphy, vphy_next,
6046 			    &port->vphys_list, list) {
6047 				/*
6048 				 * Continue with next virtual_phy object
6049 				 * if the object is not marked as dirty.
6050 				 */
6051 				if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6052 					continue;
6053 
6054 				/*
6055 				 * Continue with next virtual_phy object
6056 				 * if the object's SAS Address is not equals
6057 				 * to current Phy's vSES device SAS Address.
6058 				 */
6059 				if (vphy->sas_address != attached_sas_addr)
6060 					continue;
6061 				/*
6062 				 * Enable current Phy number bit in object's
6063 				 * phy_mask field.
6064 				 */
6065 				if (!(vphy->phy_mask & (1 << i)))
6066 					vphy->phy_mask = (1 << i);
6067 				/*
6068 				 * Get hba_port object from hba_port table
6069 				 * corresponding to current phy's Port ID.
6070 				 * if there is no hba_port object corresponding
6071 				 * to Phy's Port ID then create a new hba_port
6072 				 * object & add to hba_port table.
6073 				 */
6074 				port_id = sas_iounit_pg0->PhyData[i].Port;
6075 				mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6076 				if (!mport) {
6077 					mport = kzalloc(
6078 					    sizeof(struct hba_port), GFP_KERNEL);
6079 					if (!mport)
6080 						break;
6081 					mport->port_id = port_id;
6082 					ioc_info(ioc,
6083 					    "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6084 					    __func__, mport, mport->port_id);
6085 					list_add_tail(&mport->list,
6086 						&ioc->port_table_list);
6087 				}
6088 				/*
6089 				 * If mport & port pointers are not pointing to
6090 				 * same hba_port object then it means that vSES
6091 				 * device's Port ID got changed after reset and
6092 				 * hence move current virtual_phy object from
6093 				 * port's vphys_list to mport's vphys_list.
6094 				 */
6095 				if (port != mport) {
6096 					if (!mport->vphys_mask)
6097 						INIT_LIST_HEAD(
6098 						    &mport->vphys_list);
6099 					mport->vphys_mask |= (1 << i);
6100 					port->vphys_mask &= ~(1 << i);
6101 					list_move(&vphy->list,
6102 					    &mport->vphys_list);
6103 					sas_device = mpt3sas_get_sdev_by_addr(
6104 					    ioc, attached_sas_addr, port);
6105 					if (sas_device)
6106 						sas_device->port = mport;
6107 				}
6108 				/*
6109 				 * Earlier while updating the hba_port table,
6110 				 * it is determined that there is no other
6111 				 * direct attached device with mport's Port ID,
6112 				 * Hence mport was marked as dirty. Only vSES
6113 				 * device has this Port ID, so unmark the mport
6114 				 * as dirt.
6115 				 */
6116 				if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6117 					mport->sas_address = 0;
6118 					mport->phy_mask = 0;
6119 					mport->flags &=
6120 					    ~HBA_PORT_FLAG_DIRTY_PORT;
6121 				}
6122 				/*
6123 				 * Unmark current virtual_phy object as dirty.
6124 				 */
6125 				vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6126 				found = 1;
6127 				break;
6128 			}
6129 			if (found)
6130 				break;
6131 		}
6132 	}
6133 out:
6134 	kfree(sas_iounit_pg0);
6135 }
6136 
6137 /**
6138  * _scsih_get_port_table_after_reset - Construct temporary port table
6139  * @ioc: per adapter object
6140  * @port_table: address where port table needs to be constructed
6141  *
6142  * return number of HBA port entries available after reset.
6143  */
6144 static int
_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table)6145 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6146 	struct hba_port *port_table)
6147 {
6148 	u16 sz, ioc_status;
6149 	int i, j;
6150 	Mpi2ConfigReply_t mpi_reply;
6151 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6152 	u16 attached_handle;
6153 	u64 attached_sas_addr;
6154 	u8 found = 0, port_count = 0, port_id;
6155 
6156 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6157 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6158 	if (!sas_iounit_pg0) {
6159 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6160 		    __FILE__, __LINE__, __func__);
6161 		return port_count;
6162 	}
6163 
6164 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6165 	    sas_iounit_pg0, sz)) != 0)
6166 		goto out;
6167 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6168 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6169 		goto out;
6170 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6171 		found = 0;
6172 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6173 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6174 			continue;
6175 		attached_handle =
6176 		    le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6177 		if (_scsih_get_sas_address(
6178 		    ioc, attached_handle, &attached_sas_addr) != 0) {
6179 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6180 			    __FILE__, __LINE__, __func__);
6181 			continue;
6182 		}
6183 
6184 		for (j = 0; j < port_count; j++) {
6185 			port_id = sas_iounit_pg0->PhyData[i].Port;
6186 			if (port_table[j].port_id == port_id &&
6187 			    port_table[j].sas_address == attached_sas_addr) {
6188 				port_table[j].phy_mask |= (1 << i);
6189 				found = 1;
6190 				break;
6191 			}
6192 		}
6193 
6194 		if (found)
6195 			continue;
6196 
6197 		port_id = sas_iounit_pg0->PhyData[i].Port;
6198 		port_table[port_count].port_id = port_id;
6199 		port_table[port_count].phy_mask = (1 << i);
6200 		port_table[port_count].sas_address = attached_sas_addr;
6201 		port_count++;
6202 	}
6203 out:
6204 	kfree(sas_iounit_pg0);
6205 	return port_count;
6206 }
6207 
6208 enum hba_port_matched_codes {
6209 	NOT_MATCHED = 0,
6210 	MATCHED_WITH_ADDR_AND_PHYMASK,
6211 	MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6212 	MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6213 	MATCHED_WITH_ADDR,
6214 };
6215 
6216 /**
6217  * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6218  *					from HBA port table
6219  * @ioc: per adapter object
6220  * @port_entry: hba port entry from temporary port table which needs to be
6221  *		searched for matched entry in the HBA port table
6222  * @matched_port_entry: save matched hba port entry here
6223  * @count: count of matched entries
6224  *
6225  * return type of matched entry found.
6226  */
6227 static enum hba_port_matched_codes
_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_entry,struct hba_port ** matched_port_entry,int * count)6228 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6229 	struct hba_port *port_entry,
6230 	struct hba_port **matched_port_entry, int *count)
6231 {
6232 	struct hba_port *port_table_entry, *matched_port = NULL;
6233 	enum hba_port_matched_codes matched_code = NOT_MATCHED;
6234 	int lcount = 0;
6235 	*matched_port_entry = NULL;
6236 
6237 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6238 		if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6239 			continue;
6240 
6241 		if ((port_table_entry->sas_address == port_entry->sas_address)
6242 		    && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6243 			matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6244 			matched_port = port_table_entry;
6245 			break;
6246 		}
6247 
6248 		if ((port_table_entry->sas_address == port_entry->sas_address)
6249 		    && (port_table_entry->phy_mask & port_entry->phy_mask)
6250 		    && (port_table_entry->port_id == port_entry->port_id)) {
6251 			matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6252 			matched_port = port_table_entry;
6253 			continue;
6254 		}
6255 
6256 		if ((port_table_entry->sas_address == port_entry->sas_address)
6257 		    && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6258 			if (matched_code ==
6259 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6260 				continue;
6261 			matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6262 			matched_port = port_table_entry;
6263 			continue;
6264 		}
6265 
6266 		if (port_table_entry->sas_address == port_entry->sas_address) {
6267 			if (matched_code ==
6268 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6269 				continue;
6270 			if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6271 				continue;
6272 			matched_code = MATCHED_WITH_ADDR;
6273 			matched_port = port_table_entry;
6274 			lcount++;
6275 		}
6276 	}
6277 
6278 	*matched_port_entry = matched_port;
6279 	if (matched_code ==  MATCHED_WITH_ADDR)
6280 		*count = lcount;
6281 	return matched_code;
6282 }
6283 
6284 /**
6285  * _scsih_del_phy_part_of_anther_port - remove phy if it
6286  *				is a part of anther port
6287  *@ioc: per adapter object
6288  *@port_table: port table after reset
6289  *@index: hba port entry index
6290  *@port_count: number of ports available after host reset
6291  *@offset: HBA phy bit offset
6292  *
6293  */
6294 static void
_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table,int index,u8 port_count,int offset)6295 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6296 	struct hba_port *port_table,
6297 	int index, u8 port_count, int offset)
6298 {
6299 	struct _sas_node *sas_node = &ioc->sas_hba;
6300 	u32 i, found = 0;
6301 
6302 	for (i = 0; i < port_count; i++) {
6303 		if (i == index)
6304 			continue;
6305 
6306 		if (port_table[i].phy_mask & (1 << offset)) {
6307 			mpt3sas_transport_del_phy_from_an_existing_port(
6308 			    ioc, sas_node, &sas_node->phy[offset]);
6309 			found = 1;
6310 			break;
6311 		}
6312 	}
6313 	if (!found)
6314 		port_table[index].phy_mask |= (1 << offset);
6315 }
6316 
6317 /**
6318  * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6319  *						right port
6320  *@ioc: per adapter object
6321  *@hba_port_entry: hba port table entry
6322  *@port_table: temporary port table
6323  *@index: hba port entry index
6324  *@port_count: number of ports available after host reset
6325  *
6326  */
6327 static void
_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * hba_port_entry,struct hba_port * port_table,int index,int port_count)6328 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6329 	struct hba_port *hba_port_entry, struct hba_port *port_table,
6330 	int index, int port_count)
6331 {
6332 	u32 phy_mask, offset = 0;
6333 	struct _sas_node *sas_node = &ioc->sas_hba;
6334 
6335 	phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6336 
6337 	for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6338 		if (phy_mask & (1 << offset)) {
6339 			if (!(port_table[index].phy_mask & (1 << offset))) {
6340 				_scsih_del_phy_part_of_anther_port(
6341 				    ioc, port_table, index, port_count,
6342 				    offset);
6343 				continue;
6344 			}
6345 			if (sas_node->phy[offset].phy_belongs_to_port)
6346 				mpt3sas_transport_del_phy_from_an_existing_port(
6347 				    ioc, sas_node, &sas_node->phy[offset]);
6348 			mpt3sas_transport_add_phy_to_an_existing_port(
6349 			    ioc, sas_node, &sas_node->phy[offset],
6350 			    hba_port_entry->sas_address,
6351 			    hba_port_entry);
6352 		}
6353 	}
6354 }
6355 
6356 /**
6357  * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6358  * @ioc: per adapter object
6359  *
6360  * Returns nothing.
6361  */
6362 static void
_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER * ioc)6363 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6364 {
6365 	struct hba_port *port, *port_next;
6366 	struct virtual_phy *vphy, *vphy_next;
6367 
6368 	list_for_each_entry_safe(port, port_next,
6369 	    &ioc->port_table_list, list) {
6370 		if (!port->vphys_mask)
6371 			continue;
6372 		list_for_each_entry_safe(vphy, vphy_next,
6373 		    &port->vphys_list, list) {
6374 			if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6375 				drsprintk(ioc, ioc_info(ioc,
6376 				    "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6377 				    vphy, port->port_id,
6378 				    vphy->phy_mask));
6379 				port->vphys_mask &= ~vphy->phy_mask;
6380 				list_del(&vphy->list);
6381 				kfree(vphy);
6382 			}
6383 		}
6384 		if (!port->vphys_mask && !port->sas_address)
6385 			port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6386 	}
6387 }
6388 
6389 /**
6390  * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6391  *					after host reset
6392  *@ioc: per adapter object
6393  *
6394  */
6395 static void
_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER * ioc)6396 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6397 {
6398 	struct hba_port *port, *port_next;
6399 
6400 	list_for_each_entry_safe(port, port_next,
6401 	    &ioc->port_table_list, list) {
6402 		if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6403 		    port->flags & HBA_PORT_FLAG_NEW_PORT)
6404 			continue;
6405 
6406 		drsprintk(ioc, ioc_info(ioc,
6407 		    "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6408 		    port, port->port_id, port->phy_mask));
6409 		list_del(&port->list);
6410 		kfree(port);
6411 	}
6412 }
6413 
6414 /**
6415  * _scsih_sas_port_refresh - Update HBA port table after host reset
6416  * @ioc: per adapter object
6417  */
6418 static void
_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER * ioc)6419 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6420 {
6421 	u32 port_count = 0;
6422 	struct hba_port *port_table;
6423 	struct hba_port *port_table_entry;
6424 	struct hba_port *port_entry = NULL;
6425 	int i, j, count = 0, lcount = 0;
6426 	int ret;
6427 	u64 sas_addr;
6428 	u8 num_phys;
6429 
6430 	drsprintk(ioc, ioc_info(ioc,
6431 	    "updating ports for sas_host(0x%016llx)\n",
6432 	    (unsigned long long)ioc->sas_hba.sas_address));
6433 
6434 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6435 	if (!num_phys) {
6436 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6437 		    __FILE__, __LINE__, __func__);
6438 		return;
6439 	}
6440 
6441 	if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6442 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6443 		   __FILE__, __LINE__, __func__);
6444 		return;
6445 	}
6446 	ioc->sas_hba.num_phys = num_phys;
6447 
6448 	port_table = kcalloc(ioc->sas_hba.num_phys,
6449 	    sizeof(struct hba_port), GFP_KERNEL);
6450 	if (!port_table)
6451 		return;
6452 
6453 	port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6454 	if (!port_count)
6455 		return;
6456 
6457 	drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6458 	for (j = 0; j < port_count; j++)
6459 		drsprintk(ioc, ioc_info(ioc,
6460 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6461 		    port_table[j].port_id,
6462 		    port_table[j].phy_mask, port_table[j].sas_address));
6463 
6464 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6465 		port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6466 
6467 	drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6468 	port_table_entry = NULL;
6469 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6470 		drsprintk(ioc, ioc_info(ioc,
6471 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6472 		    port_table_entry->port_id,
6473 		    port_table_entry->phy_mask,
6474 		    port_table_entry->sas_address));
6475 	}
6476 
6477 	for (j = 0; j < port_count; j++) {
6478 		ret = _scsih_look_and_get_matched_port_entry(ioc,
6479 		    &port_table[j], &port_entry, &count);
6480 		if (!port_entry) {
6481 			drsprintk(ioc, ioc_info(ioc,
6482 			    "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6483 			    port_table[j].sas_address,
6484 			    port_table[j].port_id));
6485 			continue;
6486 		}
6487 
6488 		switch (ret) {
6489 		case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6490 		case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6491 			_scsih_add_or_del_phys_from_existing_port(ioc,
6492 			    port_entry, port_table, j, port_count);
6493 			break;
6494 		case MATCHED_WITH_ADDR:
6495 			sas_addr = port_table[j].sas_address;
6496 			for (i = 0; i < port_count; i++) {
6497 				if (port_table[i].sas_address == sas_addr)
6498 					lcount++;
6499 			}
6500 
6501 			if (count > 1 || lcount > 1)
6502 				port_entry = NULL;
6503 			else
6504 				_scsih_add_or_del_phys_from_existing_port(ioc,
6505 				    port_entry, port_table, j, port_count);
6506 		}
6507 
6508 		if (!port_entry)
6509 			continue;
6510 
6511 		if (port_entry->port_id != port_table[j].port_id)
6512 			port_entry->port_id = port_table[j].port_id;
6513 		port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6514 		port_entry->phy_mask = port_table[j].phy_mask;
6515 	}
6516 
6517 	port_table_entry = NULL;
6518 }
6519 
6520 /**
6521  * _scsih_alloc_vphy - allocate virtual_phy object
6522  * @ioc: per adapter object
6523  * @port_id: Port ID number
6524  * @phy_num: HBA Phy number
6525  *
6526  * Returns allocated virtual_phy object.
6527  */
6528 static struct virtual_phy *
_scsih_alloc_vphy(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 phy_num)6529 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6530 {
6531 	struct virtual_phy *vphy;
6532 	struct hba_port *port;
6533 
6534 	port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6535 	if (!port)
6536 		return NULL;
6537 
6538 	vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6539 	if (!vphy) {
6540 		vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6541 		if (!vphy)
6542 			return NULL;
6543 
6544 		if (!port->vphys_mask)
6545 			INIT_LIST_HEAD(&port->vphys_list);
6546 
6547 		/*
6548 		 * Enable bit corresponding to HBA phy number on its
6549 		 * parent hba_port object's vphys_mask field.
6550 		 */
6551 		port->vphys_mask |= (1 << phy_num);
6552 		vphy->phy_mask |= (1 << phy_num);
6553 
6554 		list_add_tail(&vphy->list, &port->vphys_list);
6555 
6556 		ioc_info(ioc,
6557 		    "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6558 		    vphy, port->port_id, phy_num);
6559 	}
6560 	return vphy;
6561 }
6562 
6563 /**
6564  * _scsih_sas_host_refresh - refreshing sas host object contents
6565  * @ioc: per adapter object
6566  * Context: user
6567  *
6568  * During port enable, fw will send topology events for every device. Its
6569  * possible that the handles may change from the previous setting, so this
6570  * code keeping handles updating if changed.
6571  */
6572 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)6573 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6574 {
6575 	u16 sz;
6576 	u16 ioc_status;
6577 	int i;
6578 	Mpi2ConfigReply_t mpi_reply;
6579 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6580 	u16 attached_handle;
6581 	u8 link_rate, port_id;
6582 	struct hba_port *port;
6583 	Mpi2SasPhyPage0_t phy_pg0;
6584 
6585 	dtmprintk(ioc,
6586 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6587 			   (u64)ioc->sas_hba.sas_address));
6588 
6589 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6590 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6591 	if (!sas_iounit_pg0) {
6592 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6593 			__FILE__, __LINE__, __func__);
6594 		return;
6595 	}
6596 
6597 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6598 	    sas_iounit_pg0, sz)) != 0)
6599 		goto out;
6600 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6601 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6602 		goto out;
6603 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6604 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6605 		if (i == 0)
6606 			ioc->sas_hba.handle = le16_to_cpu(
6607 			    sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6608 		port_id = sas_iounit_pg0->PhyData[i].Port;
6609 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6610 			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6611 			if (!port)
6612 				goto out;
6613 
6614 			port->port_id = port_id;
6615 			ioc_info(ioc,
6616 			    "hba_port entry: %p, port: %d is added to hba_port list\n",
6617 			    port, port->port_id);
6618 			if (ioc->shost_recovery)
6619 				port->flags = HBA_PORT_FLAG_NEW_PORT;
6620 			list_add_tail(&port->list, &ioc->port_table_list);
6621 		}
6622 		/*
6623 		 * Check whether current Phy belongs to HBA vSES device or not.
6624 		 */
6625 		if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6626 		    MPI2_SAS_DEVICE_INFO_SEP &&
6627 		    (link_rate >=  MPI2_SAS_NEG_LINK_RATE_1_5)) {
6628 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6629 			    &phy_pg0, i))) {
6630 				ioc_err(ioc,
6631 				    "failure at %s:%d/%s()!\n",
6632 				     __FILE__, __LINE__, __func__);
6633 				goto out;
6634 			}
6635 			if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6636 			    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6637 				continue;
6638 			/*
6639 			 * Allocate a virtual_phy object for vSES device, if
6640 			 * this vSES device is hot added.
6641 			 */
6642 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6643 				goto out;
6644 			ioc->sas_hba.phy[i].hba_vphy = 1;
6645 		}
6646 
6647 		/*
6648 		 * Add new HBA phys to STL if these new phys got added as part
6649 		 * of HBA Firmware upgrade/downgrade operation.
6650 		 */
6651 		if (!ioc->sas_hba.phy[i].phy) {
6652 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6653 							&phy_pg0, i))) {
6654 				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6655 					__FILE__, __LINE__, __func__);
6656 				continue;
6657 			}
6658 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6659 				MPI2_IOCSTATUS_MASK;
6660 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6661 				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6662 					__FILE__, __LINE__, __func__);
6663 				continue;
6664 			}
6665 			ioc->sas_hba.phy[i].phy_id = i;
6666 			mpt3sas_transport_add_host_phy(ioc,
6667 				&ioc->sas_hba.phy[i], phy_pg0,
6668 				ioc->sas_hba.parent_dev);
6669 			continue;
6670 		}
6671 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6672 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6673 		    AttachedDevHandle);
6674 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6675 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6676 		ioc->sas_hba.phy[i].port =
6677 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6678 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6679 		    attached_handle, i, link_rate,
6680 		    ioc->sas_hba.phy[i].port);
6681 	}
6682 	/*
6683 	 * Clear the phy details if this phy got disabled as part of
6684 	 * HBA Firmware upgrade/downgrade operation.
6685 	 */
6686 	for (i = ioc->sas_hba.num_phys;
6687 	     i < ioc->sas_hba.nr_phys_allocated; i++) {
6688 		if (ioc->sas_hba.phy[i].phy &&
6689 		    ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6690 		    SAS_LINK_RATE_1_5_GBPS)
6691 			mpt3sas_transport_update_links(ioc,
6692 				ioc->sas_hba.sas_address, 0, i,
6693 				MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6694 	}
6695  out:
6696 	kfree(sas_iounit_pg0);
6697 }
6698 
6699 /**
6700  * _scsih_sas_host_add - create sas host object
6701  * @ioc: per adapter object
6702  *
6703  * Creating host side data object, stored in ioc->sas_hba
6704  */
6705 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)6706 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6707 {
6708 	int i;
6709 	Mpi2ConfigReply_t mpi_reply;
6710 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6711 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6712 	Mpi2SasPhyPage0_t phy_pg0;
6713 	Mpi2SasDevicePage0_t sas_device_pg0;
6714 	Mpi2SasEnclosurePage0_t enclosure_pg0;
6715 	u16 ioc_status;
6716 	u16 sz;
6717 	u8 device_missing_delay;
6718 	u8 num_phys, port_id;
6719 	struct hba_port *port;
6720 
6721 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6722 	if (!num_phys) {
6723 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6724 			__FILE__, __LINE__, __func__);
6725 		return;
6726 	}
6727 
6728 	ioc->sas_hba.nr_phys_allocated = max_t(u8,
6729 	    MPT_MAX_HBA_NUM_PHYS, num_phys);
6730 	ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6731 	    sizeof(struct _sas_phy), GFP_KERNEL);
6732 	if (!ioc->sas_hba.phy) {
6733 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6734 			__FILE__, __LINE__, __func__);
6735 		goto out;
6736 	}
6737 	ioc->sas_hba.num_phys = num_phys;
6738 
6739 	/* sas_iounit page 0 */
6740 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6741 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6742 	if (!sas_iounit_pg0) {
6743 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6744 			__FILE__, __LINE__, __func__);
6745 		return;
6746 	}
6747 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6748 	    sas_iounit_pg0, sz))) {
6749 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6750 			__FILE__, __LINE__, __func__);
6751 		goto out;
6752 	}
6753 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6754 	    MPI2_IOCSTATUS_MASK;
6755 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6756 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6757 			__FILE__, __LINE__, __func__);
6758 		goto out;
6759 	}
6760 
6761 	/* sas_iounit page 1 */
6762 	sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
6763 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6764 	if (!sas_iounit_pg1) {
6765 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6766 			__FILE__, __LINE__, __func__);
6767 		goto out;
6768 	}
6769 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6770 	    sas_iounit_pg1, sz))) {
6771 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6772 			__FILE__, __LINE__, __func__);
6773 		goto out;
6774 	}
6775 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6776 	    MPI2_IOCSTATUS_MASK;
6777 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6778 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6779 			__FILE__, __LINE__, __func__);
6780 		goto out;
6781 	}
6782 
6783 	ioc->io_missing_delay =
6784 	    sas_iounit_pg1->IODeviceMissingDelay;
6785 	device_missing_delay =
6786 	    sas_iounit_pg1->ReportDeviceMissingDelay;
6787 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6788 		ioc->device_missing_delay = (device_missing_delay &
6789 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6790 	else
6791 		ioc->device_missing_delay = device_missing_delay &
6792 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6793 
6794 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6795 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6796 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6797 		    i))) {
6798 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6799 				__FILE__, __LINE__, __func__);
6800 			goto out;
6801 		}
6802 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6803 		    MPI2_IOCSTATUS_MASK;
6804 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6805 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6806 				__FILE__, __LINE__, __func__);
6807 			goto out;
6808 		}
6809 
6810 		if (i == 0)
6811 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6812 			    PhyData[0].ControllerDevHandle);
6813 
6814 		port_id = sas_iounit_pg0->PhyData[i].Port;
6815 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6816 			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6817 			if (!port)
6818 				goto out;
6819 
6820 			port->port_id = port_id;
6821 			ioc_info(ioc,
6822 			   "hba_port entry: %p, port: %d is added to hba_port list\n",
6823 			   port, port->port_id);
6824 			list_add_tail(&port->list,
6825 			    &ioc->port_table_list);
6826 		}
6827 
6828 		/*
6829 		 * Check whether current Phy belongs to HBA vSES device or not.
6830 		 */
6831 		if ((le32_to_cpu(phy_pg0.PhyInfo) &
6832 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6833 		    (phy_pg0.NegotiatedLinkRate >> 4) >=
6834 		    MPI2_SAS_NEG_LINK_RATE_1_5) {
6835 			/*
6836 			 * Allocate a virtual_phy object for vSES device.
6837 			 */
6838 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6839 				goto out;
6840 			ioc->sas_hba.phy[i].hba_vphy = 1;
6841 		}
6842 
6843 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6844 		ioc->sas_hba.phy[i].phy_id = i;
6845 		ioc->sas_hba.phy[i].port =
6846 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6847 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6848 		    phy_pg0, ioc->sas_hba.parent_dev);
6849 	}
6850 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6851 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6852 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6853 			__FILE__, __LINE__, __func__);
6854 		goto out;
6855 	}
6856 	ioc->sas_hba.enclosure_handle =
6857 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6858 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6859 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6860 		 ioc->sas_hba.handle,
6861 		 (u64)ioc->sas_hba.sas_address,
6862 		 ioc->sas_hba.num_phys);
6863 
6864 	if (ioc->sas_hba.enclosure_handle) {
6865 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6866 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6867 		   ioc->sas_hba.enclosure_handle)))
6868 			ioc->sas_hba.enclosure_logical_id =
6869 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6870 	}
6871 
6872  out:
6873 	kfree(sas_iounit_pg1);
6874 	kfree(sas_iounit_pg0);
6875 }
6876 
6877 /**
6878  * _scsih_expander_add -  creating expander object
6879  * @ioc: per adapter object
6880  * @handle: expander handle
6881  *
6882  * Creating expander object, stored in ioc->sas_expander_list.
6883  *
6884  * Return: 0 for success, else error.
6885  */
6886 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)6887 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6888 {
6889 	struct _sas_node *sas_expander;
6890 	struct _enclosure_node *enclosure_dev;
6891 	Mpi2ConfigReply_t mpi_reply;
6892 	Mpi2ExpanderPage0_t expander_pg0;
6893 	Mpi2ExpanderPage1_t expander_pg1;
6894 	u32 ioc_status;
6895 	u16 parent_handle;
6896 	u64 sas_address, sas_address_parent = 0;
6897 	int i;
6898 	unsigned long flags;
6899 	struct _sas_port *mpt3sas_port = NULL;
6900 	u8 port_id;
6901 
6902 	int rc = 0;
6903 
6904 	if (!handle)
6905 		return -1;
6906 
6907 	if (ioc->shost_recovery || ioc->pci_error_recovery)
6908 		return -1;
6909 
6910 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6911 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6912 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6913 			__FILE__, __LINE__, __func__);
6914 		return -1;
6915 	}
6916 
6917 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6918 	    MPI2_IOCSTATUS_MASK;
6919 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6920 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6921 			__FILE__, __LINE__, __func__);
6922 		return -1;
6923 	}
6924 
6925 	/* handle out of order topology events */
6926 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6927 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6928 	    != 0) {
6929 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6930 			__FILE__, __LINE__, __func__);
6931 		return -1;
6932 	}
6933 
6934 	port_id = expander_pg0.PhysicalPort;
6935 	if (sas_address_parent != ioc->sas_hba.sas_address) {
6936 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
6937 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6938 		    sas_address_parent,
6939 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
6940 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6941 		if (!sas_expander) {
6942 			rc = _scsih_expander_add(ioc, parent_handle);
6943 			if (rc != 0)
6944 				return rc;
6945 		}
6946 	}
6947 
6948 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6949 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
6950 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6951 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6952 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6953 
6954 	if (sas_expander)
6955 		return 0;
6956 
6957 	sas_expander = kzalloc(sizeof(struct _sas_node),
6958 	    GFP_KERNEL);
6959 	if (!sas_expander) {
6960 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6961 			__FILE__, __LINE__, __func__);
6962 		return -1;
6963 	}
6964 
6965 	sas_expander->handle = handle;
6966 	sas_expander->num_phys = expander_pg0.NumPhys;
6967 	sas_expander->sas_address_parent = sas_address_parent;
6968 	sas_expander->sas_address = sas_address;
6969 	sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6970 	if (!sas_expander->port) {
6971 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6972 		    __FILE__, __LINE__, __func__);
6973 		rc = -1;
6974 		goto out_fail;
6975 	}
6976 
6977 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6978 		 handle, parent_handle,
6979 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
6980 
6981 	if (!sas_expander->num_phys) {
6982 		rc = -1;
6983 		goto out_fail;
6984 	}
6985 	sas_expander->phy = kcalloc(sas_expander->num_phys,
6986 	    sizeof(struct _sas_phy), GFP_KERNEL);
6987 	if (!sas_expander->phy) {
6988 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6989 			__FILE__, __LINE__, __func__);
6990 		rc = -1;
6991 		goto out_fail;
6992 	}
6993 
6994 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
6995 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6996 	    sas_address_parent, sas_expander->port);
6997 	if (!mpt3sas_port) {
6998 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6999 			__FILE__, __LINE__, __func__);
7000 		rc = -1;
7001 		goto out_fail;
7002 	}
7003 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
7004 	sas_expander->rphy = mpt3sas_port->rphy;
7005 
7006 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
7007 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
7008 		    &expander_pg1, i, handle))) {
7009 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7010 				__FILE__, __LINE__, __func__);
7011 			rc = -1;
7012 			goto out_fail;
7013 		}
7014 		sas_expander->phy[i].handle = handle;
7015 		sas_expander->phy[i].phy_id = i;
7016 		sas_expander->phy[i].port =
7017 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
7018 
7019 		if ((mpt3sas_transport_add_expander_phy(ioc,
7020 		    &sas_expander->phy[i], expander_pg1,
7021 		    sas_expander->parent_dev))) {
7022 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7023 				__FILE__, __LINE__, __func__);
7024 			rc = -1;
7025 			goto out_fail;
7026 		}
7027 	}
7028 
7029 	if (sas_expander->enclosure_handle) {
7030 		enclosure_dev =
7031 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7032 						sas_expander->enclosure_handle);
7033 		if (enclosure_dev)
7034 			sas_expander->enclosure_logical_id =
7035 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7036 	}
7037 
7038 	_scsih_expander_node_add(ioc, sas_expander);
7039 	return 0;
7040 
7041  out_fail:
7042 
7043 	if (mpt3sas_port)
7044 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7045 		    sas_address_parent, sas_expander->port);
7046 	kfree(sas_expander);
7047 	return rc;
7048 }
7049 
7050 /**
7051  * mpt3sas_expander_remove - removing expander object
7052  * @ioc: per adapter object
7053  * @sas_address: expander sas_address
7054  * @port: hba port entry
7055  */
7056 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)7057 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7058 	struct hba_port *port)
7059 {
7060 	struct _sas_node *sas_expander;
7061 	unsigned long flags;
7062 
7063 	if (ioc->shost_recovery)
7064 		return;
7065 
7066 	if (!port)
7067 		return;
7068 
7069 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7070 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7071 	    sas_address, port);
7072 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7073 	if (sas_expander)
7074 		_scsih_expander_node_remove(ioc, sas_expander);
7075 }
7076 
7077 /**
7078  * _scsih_done -  internal SCSI_IO callback handler.
7079  * @ioc: per adapter object
7080  * @smid: system request message index
7081  * @msix_index: MSIX table index supplied by the OS
7082  * @reply: reply message frame(lower 32bit addr)
7083  *
7084  * Callback handler when sending internal generated SCSI_IO.
7085  * The callback index passed is `ioc->scsih_cb_idx`
7086  *
7087  * Return: 1 meaning mf should be freed from _base_interrupt
7088  *         0 means the mf is freed from this function.
7089  */
7090 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)7091 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7092 {
7093 	MPI2DefaultReply_t *mpi_reply;
7094 
7095 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
7096 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7097 		return 1;
7098 	if (ioc->scsih_cmds.smid != smid)
7099 		return 1;
7100 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7101 	if (mpi_reply) {
7102 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
7103 		    mpi_reply->MsgLength*4);
7104 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7105 	}
7106 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7107 	complete(&ioc->scsih_cmds.done);
7108 	return 1;
7109 }
7110 
7111 
7112 
7113 
7114 #define MPT3_MAX_LUNS (255)
7115 
7116 
7117 /**
7118  * _scsih_check_access_status - check access flags
7119  * @ioc: per adapter object
7120  * @sas_address: sas address
7121  * @handle: sas device handle
7122  * @access_status: errors returned during discovery of the device
7123  *
7124  * Return: 0 for success, else failure
7125  */
7126 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)7127 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7128 	u16 handle, u8 access_status)
7129 {
7130 	u8 rc = 1;
7131 	char *desc = NULL;
7132 
7133 	switch (access_status) {
7134 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7135 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7136 		rc = 0;
7137 		break;
7138 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7139 		desc = "sata capability failed";
7140 		break;
7141 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7142 		desc = "sata affiliation conflict";
7143 		break;
7144 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7145 		desc = "route not addressable";
7146 		break;
7147 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7148 		desc = "smp error not addressable";
7149 		break;
7150 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7151 		desc = "device blocked";
7152 		break;
7153 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7154 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7155 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7156 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7157 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7158 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7159 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7160 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7161 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7162 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7163 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7164 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7165 		desc = "sata initialization failed";
7166 		break;
7167 	default:
7168 		desc = "unknown";
7169 		break;
7170 	}
7171 
7172 	if (!rc)
7173 		return 0;
7174 
7175 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7176 		desc, (u64)sas_address, handle);
7177 	return rc;
7178 }
7179 
7180 /**
7181  * _scsih_check_device - checking device responsiveness
7182  * @ioc: per adapter object
7183  * @parent_sas_address: sas address of parent expander or sas host
7184  * @handle: attached device handle
7185  * @phy_number: phy number
7186  * @link_rate: new link rate
7187  */
7188 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)7189 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7190 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7191 {
7192 	Mpi2ConfigReply_t mpi_reply;
7193 	Mpi2SasDevicePage0_t sas_device_pg0;
7194 	struct _sas_device *sas_device = NULL;
7195 	struct _enclosure_node *enclosure_dev = NULL;
7196 	u32 ioc_status;
7197 	unsigned long flags;
7198 	u64 sas_address;
7199 	struct scsi_target *starget;
7200 	struct MPT3SAS_TARGET *sas_target_priv_data;
7201 	u32 device_info;
7202 	struct hba_port *port;
7203 
7204 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7205 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7206 		return;
7207 
7208 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7209 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7210 		return;
7211 
7212 	/* wide port handling ~ we need only handle device once for the phy that
7213 	 * is matched in sas device page zero
7214 	 */
7215 	if (phy_number != sas_device_pg0.PhyNum)
7216 		return;
7217 
7218 	/* check if this is end device */
7219 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7220 	if (!(_scsih_is_end_device(device_info)))
7221 		return;
7222 
7223 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7224 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7225 	port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7226 	if (!port)
7227 		goto out_unlock;
7228 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7229 	    sas_address, port);
7230 
7231 	if (!sas_device)
7232 		goto out_unlock;
7233 
7234 	if (unlikely(sas_device->handle != handle)) {
7235 		starget = sas_device->starget;
7236 		sas_target_priv_data = starget->hostdata;
7237 		starget_printk(KERN_INFO, starget,
7238 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
7239 			sas_device->handle, handle);
7240 		sas_target_priv_data->handle = handle;
7241 		sas_device->handle = handle;
7242 		if (le16_to_cpu(sas_device_pg0.Flags) &
7243 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7244 			sas_device->enclosure_level =
7245 				sas_device_pg0.EnclosureLevel;
7246 			memcpy(sas_device->connector_name,
7247 				sas_device_pg0.ConnectorName, 4);
7248 			sas_device->connector_name[4] = '\0';
7249 		} else {
7250 			sas_device->enclosure_level = 0;
7251 			sas_device->connector_name[0] = '\0';
7252 		}
7253 
7254 		sas_device->enclosure_handle =
7255 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
7256 		sas_device->is_chassis_slot_valid = 0;
7257 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7258 						sas_device->enclosure_handle);
7259 		if (enclosure_dev) {
7260 			sas_device->enclosure_logical_id =
7261 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7262 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7263 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7264 				sas_device->is_chassis_slot_valid = 1;
7265 				sas_device->chassis_slot =
7266 					enclosure_dev->pg0.ChassisSlot;
7267 			}
7268 		}
7269 	}
7270 
7271 	/* check if device is present */
7272 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7273 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7274 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7275 			handle);
7276 		goto out_unlock;
7277 	}
7278 
7279 	/* check if there were any issues with discovery */
7280 	if (_scsih_check_access_status(ioc, sas_address, handle,
7281 	    sas_device_pg0.AccessStatus))
7282 		goto out_unlock;
7283 
7284 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7285 	_scsih_ublock_io_device(ioc, sas_address, port);
7286 
7287 	if (sas_device)
7288 		sas_device_put(sas_device);
7289 	return;
7290 
7291 out_unlock:
7292 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7293 	if (sas_device)
7294 		sas_device_put(sas_device);
7295 }
7296 
7297 /**
7298  * _scsih_add_device -  creating sas device object
7299  * @ioc: per adapter object
7300  * @handle: sas device handle
7301  * @phy_num: phy number end device attached to
7302  * @is_pd: is this hidden raid component
7303  *
7304  * Creating end device object, stored in ioc->sas_device_list.
7305  *
7306  * Return: 0 for success, non-zero for failure.
7307  */
7308 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)7309 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7310 	u8 is_pd)
7311 {
7312 	Mpi2ConfigReply_t mpi_reply;
7313 	Mpi2SasDevicePage0_t sas_device_pg0;
7314 	struct _sas_device *sas_device;
7315 	struct _enclosure_node *enclosure_dev = NULL;
7316 	u32 ioc_status;
7317 	u64 sas_address;
7318 	u32 device_info;
7319 	u8 port_id;
7320 
7321 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7322 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7323 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7324 			__FILE__, __LINE__, __func__);
7325 		return -1;
7326 	}
7327 
7328 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7329 	    MPI2_IOCSTATUS_MASK;
7330 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7331 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7332 			__FILE__, __LINE__, __func__);
7333 		return -1;
7334 	}
7335 
7336 	/* check if this is end device */
7337 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7338 	if (!(_scsih_is_end_device(device_info)))
7339 		return -1;
7340 	set_bit(handle, ioc->pend_os_device_add);
7341 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7342 
7343 	/* check if device is present */
7344 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7345 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7346 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7347 			handle);
7348 		return -1;
7349 	}
7350 
7351 	/* check if there were any issues with discovery */
7352 	if (_scsih_check_access_status(ioc, sas_address, handle,
7353 	    sas_device_pg0.AccessStatus))
7354 		return -1;
7355 
7356 	port_id = sas_device_pg0.PhysicalPort;
7357 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
7358 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7359 	if (sas_device) {
7360 		clear_bit(handle, ioc->pend_os_device_add);
7361 		sas_device_put(sas_device);
7362 		return -1;
7363 	}
7364 
7365 	if (sas_device_pg0.EnclosureHandle) {
7366 		enclosure_dev =
7367 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7368 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
7369 		if (enclosure_dev == NULL)
7370 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7371 				 sas_device_pg0.EnclosureHandle);
7372 	}
7373 
7374 	sas_device = kzalloc(sizeof(struct _sas_device),
7375 	    GFP_KERNEL);
7376 	if (!sas_device) {
7377 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7378 			__FILE__, __LINE__, __func__);
7379 		return 0;
7380 	}
7381 
7382 	kref_init(&sas_device->refcount);
7383 	sas_device->handle = handle;
7384 	if (_scsih_get_sas_address(ioc,
7385 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
7386 	    &sas_device->sas_address_parent) != 0)
7387 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7388 			__FILE__, __LINE__, __func__);
7389 	sas_device->enclosure_handle =
7390 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
7391 	if (sas_device->enclosure_handle != 0)
7392 		sas_device->slot =
7393 		    le16_to_cpu(sas_device_pg0.Slot);
7394 	sas_device->device_info = device_info;
7395 	sas_device->sas_address = sas_address;
7396 	sas_device->phy = sas_device_pg0.PhyNum;
7397 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7398 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7399 	sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7400 	if (!sas_device->port) {
7401 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7402 		    __FILE__, __LINE__, __func__);
7403 		goto out;
7404 	}
7405 
7406 	if (le16_to_cpu(sas_device_pg0.Flags)
7407 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7408 		sas_device->enclosure_level =
7409 			sas_device_pg0.EnclosureLevel;
7410 		memcpy(sas_device->connector_name,
7411 			sas_device_pg0.ConnectorName, 4);
7412 		sas_device->connector_name[4] = '\0';
7413 	} else {
7414 		sas_device->enclosure_level = 0;
7415 		sas_device->connector_name[0] = '\0';
7416 	}
7417 	/* get enclosure_logical_id & chassis_slot*/
7418 	sas_device->is_chassis_slot_valid = 0;
7419 	if (enclosure_dev) {
7420 		sas_device->enclosure_logical_id =
7421 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7422 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7423 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7424 			sas_device->is_chassis_slot_valid = 1;
7425 			sas_device->chassis_slot =
7426 					enclosure_dev->pg0.ChassisSlot;
7427 		}
7428 	}
7429 
7430 	/* get device name */
7431 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7432 	sas_device->port_type = sas_device_pg0.MaxPortConnections;
7433 	ioc_info(ioc,
7434 	    "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7435 	    handle, sas_device->sas_address, sas_device->port_type);
7436 
7437 	if (ioc->wait_for_discovery_to_complete)
7438 		_scsih_sas_device_init_add(ioc, sas_device);
7439 	else
7440 		_scsih_sas_device_add(ioc, sas_device);
7441 
7442 out:
7443 	sas_device_put(sas_device);
7444 	return 0;
7445 }
7446 
7447 /**
7448  * _scsih_remove_device -  removing sas device object
7449  * @ioc: per adapter object
7450  * @sas_device: the sas_device object
7451  */
7452 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)7453 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7454 	struct _sas_device *sas_device)
7455 {
7456 	struct MPT3SAS_TARGET *sas_target_priv_data;
7457 
7458 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7459 	     (sas_device->pfa_led_on)) {
7460 		_scsih_turn_off_pfa_led(ioc, sas_device);
7461 		sas_device->pfa_led_on = 0;
7462 	}
7463 
7464 	dewtprintk(ioc,
7465 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7466 			    __func__,
7467 			    sas_device->handle, (u64)sas_device->sas_address));
7468 
7469 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7470 	    NULL, NULL));
7471 
7472 	if (sas_device->starget && sas_device->starget->hostdata) {
7473 		sas_target_priv_data = sas_device->starget->hostdata;
7474 		sas_target_priv_data->deleted = 1;
7475 		_scsih_ublock_io_device(ioc, sas_device->sas_address,
7476 		    sas_device->port);
7477 		sas_target_priv_data->handle =
7478 		     MPT3SAS_INVALID_DEVICE_HANDLE;
7479 	}
7480 
7481 	if (!ioc->hide_drives)
7482 		mpt3sas_transport_port_remove(ioc,
7483 		    sas_device->sas_address,
7484 		    sas_device->sas_address_parent,
7485 		    sas_device->port);
7486 
7487 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7488 		 sas_device->handle, (u64)sas_device->sas_address);
7489 
7490 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7491 
7492 	dewtprintk(ioc,
7493 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7494 			    __func__,
7495 			    sas_device->handle, (u64)sas_device->sas_address));
7496 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7497 	    NULL, NULL));
7498 }
7499 
7500 /**
7501  * _scsih_sas_topology_change_event_debug - debug for topology event
7502  * @ioc: per adapter object
7503  * @event_data: event data payload
7504  * Context: user.
7505  */
7506 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)7507 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7508 	Mpi2EventDataSasTopologyChangeList_t *event_data)
7509 {
7510 	int i;
7511 	u16 handle;
7512 	u16 reason_code;
7513 	u8 phy_number;
7514 	char *status_str = NULL;
7515 	u8 link_rate, prev_link_rate;
7516 
7517 	switch (event_data->ExpStatus) {
7518 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7519 		status_str = "add";
7520 		break;
7521 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7522 		status_str = "remove";
7523 		break;
7524 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7525 	case 0:
7526 		status_str =  "responding";
7527 		break;
7528 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7529 		status_str = "remove delay";
7530 		break;
7531 	default:
7532 		status_str = "unknown status";
7533 		break;
7534 	}
7535 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7536 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7537 	    "start_phy(%02d), count(%d)\n",
7538 	    le16_to_cpu(event_data->ExpanderDevHandle),
7539 	    le16_to_cpu(event_data->EnclosureHandle),
7540 	    event_data->StartPhyNum, event_data->NumEntries);
7541 	for (i = 0; i < event_data->NumEntries; i++) {
7542 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7543 		if (!handle)
7544 			continue;
7545 		phy_number = event_data->StartPhyNum + i;
7546 		reason_code = event_data->PHY[i].PhyStatus &
7547 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7548 		switch (reason_code) {
7549 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7550 			status_str = "target add";
7551 			break;
7552 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7553 			status_str = "target remove";
7554 			break;
7555 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7556 			status_str = "delay target remove";
7557 			break;
7558 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7559 			status_str = "link rate change";
7560 			break;
7561 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7562 			status_str = "target responding";
7563 			break;
7564 		default:
7565 			status_str = "unknown";
7566 			break;
7567 		}
7568 		link_rate = event_data->PHY[i].LinkRate >> 4;
7569 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7570 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7571 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7572 		    handle, status_str, link_rate, prev_link_rate);
7573 
7574 	}
7575 }
7576 
7577 /**
7578  * _scsih_sas_topology_change_event - handle topology changes
7579  * @ioc: per adapter object
7580  * @fw_event: The fw_event_work object
7581  * Context: user.
7582  *
7583  */
7584 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7585 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7586 	struct fw_event_work *fw_event)
7587 {
7588 	int i;
7589 	u16 parent_handle, handle;
7590 	u16 reason_code;
7591 	u8 phy_number, max_phys;
7592 	struct _sas_node *sas_expander;
7593 	u64 sas_address;
7594 	unsigned long flags;
7595 	u8 link_rate, prev_link_rate;
7596 	struct hba_port *port;
7597 	Mpi2EventDataSasTopologyChangeList_t *event_data =
7598 		(Mpi2EventDataSasTopologyChangeList_t *)
7599 		fw_event->event_data;
7600 
7601 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7602 		_scsih_sas_topology_change_event_debug(ioc, event_data);
7603 
7604 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7605 		return 0;
7606 
7607 	if (!ioc->sas_hba.num_phys)
7608 		_scsih_sas_host_add(ioc);
7609 	else
7610 		_scsih_sas_host_refresh(ioc);
7611 
7612 	if (fw_event->ignore) {
7613 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7614 		return 0;
7615 	}
7616 
7617 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7618 	port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7619 
7620 	/* handle expander add */
7621 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7622 		if (_scsih_expander_add(ioc, parent_handle) != 0)
7623 			return 0;
7624 
7625 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7626 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7627 	    parent_handle);
7628 	if (sas_expander) {
7629 		sas_address = sas_expander->sas_address;
7630 		max_phys = sas_expander->num_phys;
7631 		port = sas_expander->port;
7632 	} else if (parent_handle < ioc->sas_hba.num_phys) {
7633 		sas_address = ioc->sas_hba.sas_address;
7634 		max_phys = ioc->sas_hba.num_phys;
7635 	} else {
7636 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7637 		return 0;
7638 	}
7639 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7640 
7641 	/* handle siblings events */
7642 	for (i = 0; i < event_data->NumEntries; i++) {
7643 		if (fw_event->ignore) {
7644 			dewtprintk(ioc,
7645 				   ioc_info(ioc, "ignoring expander event\n"));
7646 			return 0;
7647 		}
7648 		if (ioc->remove_host || ioc->pci_error_recovery)
7649 			return 0;
7650 		phy_number = event_data->StartPhyNum + i;
7651 		if (phy_number >= max_phys)
7652 			continue;
7653 		reason_code = event_data->PHY[i].PhyStatus &
7654 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7655 		if ((event_data->PHY[i].PhyStatus &
7656 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7657 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7658 				continue;
7659 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7660 		if (!handle)
7661 			continue;
7662 		link_rate = event_data->PHY[i].LinkRate >> 4;
7663 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7664 		switch (reason_code) {
7665 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7666 
7667 			if (ioc->shost_recovery)
7668 				break;
7669 
7670 			if (link_rate == prev_link_rate)
7671 				break;
7672 
7673 			mpt3sas_transport_update_links(ioc, sas_address,
7674 			    handle, phy_number, link_rate, port);
7675 
7676 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7677 				break;
7678 
7679 			_scsih_check_device(ioc, sas_address, handle,
7680 			    phy_number, link_rate);
7681 
7682 			if (!test_bit(handle, ioc->pend_os_device_add))
7683 				break;
7684 
7685 			fallthrough;
7686 
7687 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7688 
7689 			if (ioc->shost_recovery)
7690 				break;
7691 
7692 			mpt3sas_transport_update_links(ioc, sas_address,
7693 			    handle, phy_number, link_rate, port);
7694 
7695 			_scsih_add_device(ioc, handle, phy_number, 0);
7696 
7697 			break;
7698 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7699 
7700 			_scsih_device_remove_by_handle(ioc, handle);
7701 			break;
7702 		}
7703 	}
7704 
7705 	/* handle expander removal */
7706 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7707 	    sas_expander)
7708 		mpt3sas_expander_remove(ioc, sas_address, port);
7709 
7710 	return 0;
7711 }
7712 
7713 /**
7714  * _scsih_sas_device_status_change_event_debug - debug for device event
7715  * @ioc: ?
7716  * @event_data: event data payload
7717  * Context: user.
7718  */
7719 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7720 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7721 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7722 {
7723 	char *reason_str = NULL;
7724 
7725 	switch (event_data->ReasonCode) {
7726 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7727 		reason_str = "smart data";
7728 		break;
7729 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7730 		reason_str = "unsupported device discovered";
7731 		break;
7732 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7733 		reason_str = "internal device reset";
7734 		break;
7735 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7736 		reason_str = "internal task abort";
7737 		break;
7738 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7739 		reason_str = "internal task abort set";
7740 		break;
7741 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7742 		reason_str = "internal clear task set";
7743 		break;
7744 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7745 		reason_str = "internal query task";
7746 		break;
7747 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7748 		reason_str = "sata init failure";
7749 		break;
7750 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7751 		reason_str = "internal device reset complete";
7752 		break;
7753 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7754 		reason_str = "internal task abort complete";
7755 		break;
7756 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7757 		reason_str = "internal async notification";
7758 		break;
7759 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7760 		reason_str = "expander reduced functionality";
7761 		break;
7762 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7763 		reason_str = "expander reduced functionality complete";
7764 		break;
7765 	default:
7766 		reason_str = "unknown reason";
7767 		break;
7768 	}
7769 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7770 		 reason_str, le16_to_cpu(event_data->DevHandle),
7771 		 (u64)le64_to_cpu(event_data->SASAddress),
7772 		 le16_to_cpu(event_data->TaskTag));
7773 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7774 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7775 			event_data->ASC, event_data->ASCQ);
7776 	pr_cont("\n");
7777 }
7778 
7779 /**
7780  * _scsih_sas_device_status_change_event - handle device status change
7781  * @ioc: per adapter object
7782  * @event_data: The fw event
7783  * Context: user.
7784  */
7785 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7786 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7787 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7788 {
7789 	struct MPT3SAS_TARGET *target_priv_data;
7790 	struct _sas_device *sas_device;
7791 	u64 sas_address;
7792 	unsigned long flags;
7793 
7794 	/* In MPI Revision K (0xC), the internal device reset complete was
7795 	 * implemented, so avoid setting tm_busy flag for older firmware.
7796 	 */
7797 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7798 		return;
7799 
7800 	if (event_data->ReasonCode !=
7801 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7802 	   event_data->ReasonCode !=
7803 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7804 		return;
7805 
7806 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7807 	sas_address = le64_to_cpu(event_data->SASAddress);
7808 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7809 	    sas_address,
7810 	    mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7811 
7812 	if (!sas_device || !sas_device->starget)
7813 		goto out;
7814 
7815 	target_priv_data = sas_device->starget->hostdata;
7816 	if (!target_priv_data)
7817 		goto out;
7818 
7819 	if (event_data->ReasonCode ==
7820 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7821 		target_priv_data->tm_busy = 1;
7822 	else
7823 		target_priv_data->tm_busy = 0;
7824 
7825 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7826 		ioc_info(ioc,
7827 		    "%s tm_busy flag for handle(0x%04x)\n",
7828 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7829 		    target_priv_data->handle);
7830 
7831 out:
7832 	if (sas_device)
7833 		sas_device_put(sas_device);
7834 
7835 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7836 }
7837 
7838 
7839 /**
7840  * _scsih_check_pcie_access_status - check access flags
7841  * @ioc: per adapter object
7842  * @wwid: wwid
7843  * @handle: sas device handle
7844  * @access_status: errors returned during discovery of the device
7845  *
7846  * Return: 0 for success, else failure
7847  */
7848 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)7849 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7850 	u16 handle, u8 access_status)
7851 {
7852 	u8 rc = 1;
7853 	char *desc = NULL;
7854 
7855 	switch (access_status) {
7856 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7857 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7858 		rc = 0;
7859 		break;
7860 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7861 		desc = "PCIe device capability failed";
7862 		break;
7863 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7864 		desc = "PCIe device blocked";
7865 		ioc_info(ioc,
7866 		    "Device with Access Status (%s): wwid(0x%016llx), "
7867 		    "handle(0x%04x)\n ll only be added to the internal list",
7868 		    desc, (u64)wwid, handle);
7869 		rc = 0;
7870 		break;
7871 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7872 		desc = "PCIe device mem space access failed";
7873 		break;
7874 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7875 		desc = "PCIe device unsupported";
7876 		break;
7877 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7878 		desc = "PCIe device MSIx Required";
7879 		break;
7880 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7881 		desc = "PCIe device init fail max";
7882 		break;
7883 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7884 		desc = "PCIe device status unknown";
7885 		break;
7886 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7887 		desc = "nvme ready timeout";
7888 		break;
7889 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7890 		desc = "nvme device configuration unsupported";
7891 		break;
7892 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7893 		desc = "nvme identify failed";
7894 		break;
7895 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7896 		desc = "nvme qconfig failed";
7897 		break;
7898 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7899 		desc = "nvme qcreation failed";
7900 		break;
7901 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7902 		desc = "nvme eventcfg failed";
7903 		break;
7904 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7905 		desc = "nvme get feature stat failed";
7906 		break;
7907 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7908 		desc = "nvme idle timeout";
7909 		break;
7910 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7911 		desc = "nvme failure status";
7912 		break;
7913 	default:
7914 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7915 			access_status, (u64)wwid, handle);
7916 		return rc;
7917 	}
7918 
7919 	if (!rc)
7920 		return rc;
7921 
7922 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7923 		 desc, (u64)wwid, handle);
7924 	return rc;
7925 }
7926 
7927 /**
7928  * _scsih_pcie_device_remove_from_sml -  removing pcie device
7929  * from SML and free up associated memory
7930  * @ioc: per adapter object
7931  * @pcie_device: the pcie_device object
7932  */
7933 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)7934 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7935 	struct _pcie_device *pcie_device)
7936 {
7937 	struct MPT3SAS_TARGET *sas_target_priv_data;
7938 
7939 	dewtprintk(ioc,
7940 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7941 			    __func__,
7942 			    pcie_device->handle, (u64)pcie_device->wwid));
7943 	if (pcie_device->enclosure_handle != 0)
7944 		dewtprintk(ioc,
7945 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7946 				    __func__,
7947 				    (u64)pcie_device->enclosure_logical_id,
7948 				    pcie_device->slot));
7949 	if (pcie_device->connector_name[0] != '\0')
7950 		dewtprintk(ioc,
7951 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7952 				    __func__,
7953 				    pcie_device->enclosure_level,
7954 				    pcie_device->connector_name));
7955 
7956 	if (pcie_device->starget && pcie_device->starget->hostdata) {
7957 		sas_target_priv_data = pcie_device->starget->hostdata;
7958 		sas_target_priv_data->deleted = 1;
7959 		_scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7960 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7961 	}
7962 
7963 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7964 		 pcie_device->handle, (u64)pcie_device->wwid);
7965 	if (pcie_device->enclosure_handle != 0)
7966 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7967 			 (u64)pcie_device->enclosure_logical_id,
7968 			 pcie_device->slot);
7969 	if (pcie_device->connector_name[0] != '\0')
7970 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7971 			 pcie_device->enclosure_level,
7972 			 pcie_device->connector_name);
7973 
7974 	if (pcie_device->starget && (pcie_device->access_status !=
7975 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7976 		scsi_remove_target(&pcie_device->starget->dev);
7977 	dewtprintk(ioc,
7978 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7979 			    __func__,
7980 			    pcie_device->handle, (u64)pcie_device->wwid));
7981 	if (pcie_device->enclosure_handle != 0)
7982 		dewtprintk(ioc,
7983 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7984 				    __func__,
7985 				    (u64)pcie_device->enclosure_logical_id,
7986 				    pcie_device->slot));
7987 	if (pcie_device->connector_name[0] != '\0')
7988 		dewtprintk(ioc,
7989 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7990 				    __func__,
7991 				    pcie_device->enclosure_level,
7992 				    pcie_device->connector_name));
7993 
7994 	kfree(pcie_device->serial_number);
7995 }
7996 
7997 
7998 /**
7999  * _scsih_pcie_check_device - checking device responsiveness
8000  * @ioc: per adapter object
8001  * @handle: attached device handle
8002  */
8003 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8004 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8005 {
8006 	Mpi2ConfigReply_t mpi_reply;
8007 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8008 	u32 ioc_status;
8009 	struct _pcie_device *pcie_device;
8010 	u64 wwid;
8011 	unsigned long flags;
8012 	struct scsi_target *starget;
8013 	struct MPT3SAS_TARGET *sas_target_priv_data;
8014 	u32 device_info;
8015 
8016 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8017 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8018 		return;
8019 
8020 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8021 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8022 		return;
8023 
8024 	/* check if this is end device */
8025 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8026 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8027 		return;
8028 
8029 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8030 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8031 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8032 
8033 	if (!pcie_device) {
8034 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8035 		return;
8036 	}
8037 
8038 	if (unlikely(pcie_device->handle != handle)) {
8039 		starget = pcie_device->starget;
8040 		sas_target_priv_data = starget->hostdata;
8041 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
8042 		starget_printk(KERN_INFO, starget,
8043 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
8044 		    pcie_device->handle, handle);
8045 		sas_target_priv_data->handle = handle;
8046 		pcie_device->handle = handle;
8047 
8048 		if (le32_to_cpu(pcie_device_pg0.Flags) &
8049 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8050 			pcie_device->enclosure_level =
8051 			    pcie_device_pg0.EnclosureLevel;
8052 			memcpy(&pcie_device->connector_name[0],
8053 			    &pcie_device_pg0.ConnectorName[0], 4);
8054 		} else {
8055 			pcie_device->enclosure_level = 0;
8056 			pcie_device->connector_name[0] = '\0';
8057 		}
8058 	}
8059 
8060 	/* check if device is present */
8061 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8062 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8063 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8064 			 handle);
8065 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8066 		pcie_device_put(pcie_device);
8067 		return;
8068 	}
8069 
8070 	/* check if there were any issues with discovery */
8071 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8072 	    pcie_device_pg0.AccessStatus)) {
8073 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8074 		pcie_device_put(pcie_device);
8075 		return;
8076 	}
8077 
8078 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8079 	pcie_device_put(pcie_device);
8080 
8081 	_scsih_ublock_io_device(ioc, wwid, NULL);
8082 
8083 	return;
8084 }
8085 
8086 /**
8087  * _scsih_pcie_add_device -  creating pcie device object
8088  * @ioc: per adapter object
8089  * @handle: pcie device handle
8090  *
8091  * Creating end device object, stored in ioc->pcie_device_list.
8092  *
8093  * Return: 1 means queue the event later, 0 means complete the event
8094  */
8095 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8096 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8097 {
8098 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8099 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
8100 	Mpi2ConfigReply_t mpi_reply;
8101 	struct _pcie_device *pcie_device;
8102 	struct _enclosure_node *enclosure_dev;
8103 	u32 ioc_status;
8104 	u64 wwid;
8105 
8106 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8107 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8108 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8109 			__FILE__, __LINE__, __func__);
8110 		return 0;
8111 	}
8112 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8113 	    MPI2_IOCSTATUS_MASK;
8114 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8115 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8116 			__FILE__, __LINE__, __func__);
8117 		return 0;
8118 	}
8119 
8120 	set_bit(handle, ioc->pend_os_device_add);
8121 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8122 
8123 	/* check if device is present */
8124 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8125 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8126 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8127 			handle);
8128 		return 0;
8129 	}
8130 
8131 	/* check if there were any issues with discovery */
8132 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8133 	    pcie_device_pg0.AccessStatus))
8134 		return 0;
8135 
8136 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8137 	    (pcie_device_pg0.DeviceInfo))))
8138 		return 0;
8139 
8140 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8141 	if (pcie_device) {
8142 		clear_bit(handle, ioc->pend_os_device_add);
8143 		pcie_device_put(pcie_device);
8144 		return 0;
8145 	}
8146 
8147 	/* PCIe Device Page 2 contains read-only information about a
8148 	 * specific NVMe device; therefore, this page is only
8149 	 * valid for NVMe devices and skip for pcie devices of type scsi.
8150 	 */
8151 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8152 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8153 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8154 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8155 		    handle)) {
8156 			ioc_err(ioc,
8157 			    "failure at %s:%d/%s()!\n", __FILE__,
8158 			    __LINE__, __func__);
8159 			return 0;
8160 		}
8161 
8162 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8163 					MPI2_IOCSTATUS_MASK;
8164 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8165 			ioc_err(ioc,
8166 			    "failure at %s:%d/%s()!\n", __FILE__,
8167 			    __LINE__, __func__);
8168 			return 0;
8169 		}
8170 	}
8171 
8172 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8173 	if (!pcie_device) {
8174 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8175 			__FILE__, __LINE__, __func__);
8176 		return 0;
8177 	}
8178 
8179 	kref_init(&pcie_device->refcount);
8180 	pcie_device->id = ioc->pcie_target_id++;
8181 	pcie_device->channel = PCIE_CHANNEL;
8182 	pcie_device->handle = handle;
8183 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
8184 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8185 	pcie_device->wwid = wwid;
8186 	pcie_device->port_num = pcie_device_pg0.PortNum;
8187 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8188 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8189 
8190 	pcie_device->enclosure_handle =
8191 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8192 	if (pcie_device->enclosure_handle != 0)
8193 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8194 
8195 	if (le32_to_cpu(pcie_device_pg0.Flags) &
8196 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8197 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8198 		memcpy(&pcie_device->connector_name[0],
8199 		    &pcie_device_pg0.ConnectorName[0], 4);
8200 	} else {
8201 		pcie_device->enclosure_level = 0;
8202 		pcie_device->connector_name[0] = '\0';
8203 	}
8204 
8205 	/* get enclosure_logical_id */
8206 	if (pcie_device->enclosure_handle) {
8207 		enclosure_dev =
8208 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8209 						pcie_device->enclosure_handle);
8210 		if (enclosure_dev)
8211 			pcie_device->enclosure_logical_id =
8212 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8213 	}
8214 	/* TODO -- Add device name once FW supports it */
8215 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8216 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8217 		pcie_device->nvme_mdts =
8218 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8219 		pcie_device->shutdown_latency =
8220 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8221 		/*
8222 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8223 		 * if drive's RTD3 Entry Latency is greater then IOC's
8224 		 * max_shutdown_latency.
8225 		 */
8226 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8227 			ioc->max_shutdown_latency =
8228 				pcie_device->shutdown_latency;
8229 		if (pcie_device_pg2.ControllerResetTO)
8230 			pcie_device->reset_timeout =
8231 			    pcie_device_pg2.ControllerResetTO;
8232 		else
8233 			pcie_device->reset_timeout = 30;
8234 	} else
8235 		pcie_device->reset_timeout = 30;
8236 
8237 	if (ioc->wait_for_discovery_to_complete)
8238 		_scsih_pcie_device_init_add(ioc, pcie_device);
8239 	else
8240 		_scsih_pcie_device_add(ioc, pcie_device);
8241 
8242 	pcie_device_put(pcie_device);
8243 	return 0;
8244 }
8245 
8246 /**
8247  * _scsih_pcie_topology_change_event_debug - debug for topology
8248  * event
8249  * @ioc: per adapter object
8250  * @event_data: event data payload
8251  * Context: user.
8252  */
8253 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)8254 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8255 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8256 {
8257 	int i;
8258 	u16 handle;
8259 	u16 reason_code;
8260 	u8 port_number;
8261 	char *status_str = NULL;
8262 	u8 link_rate, prev_link_rate;
8263 
8264 	switch (event_data->SwitchStatus) {
8265 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8266 		status_str = "add";
8267 		break;
8268 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8269 		status_str = "remove";
8270 		break;
8271 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8272 	case 0:
8273 		status_str =  "responding";
8274 		break;
8275 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8276 		status_str = "remove delay";
8277 		break;
8278 	default:
8279 		status_str = "unknown status";
8280 		break;
8281 	}
8282 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8283 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8284 		"start_port(%02d), count(%d)\n",
8285 		le16_to_cpu(event_data->SwitchDevHandle),
8286 		le16_to_cpu(event_data->EnclosureHandle),
8287 		event_data->StartPortNum, event_data->NumEntries);
8288 	for (i = 0; i < event_data->NumEntries; i++) {
8289 		handle =
8290 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8291 		if (!handle)
8292 			continue;
8293 		port_number = event_data->StartPortNum + i;
8294 		reason_code = event_data->PortEntry[i].PortStatus;
8295 		switch (reason_code) {
8296 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8297 			status_str = "target add";
8298 			break;
8299 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8300 			status_str = "target remove";
8301 			break;
8302 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8303 			status_str = "delay target remove";
8304 			break;
8305 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8306 			status_str = "link rate change";
8307 			break;
8308 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8309 			status_str = "target responding";
8310 			break;
8311 		default:
8312 			status_str = "unknown";
8313 			break;
8314 		}
8315 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
8316 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8317 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8318 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8319 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8320 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
8321 			handle, status_str, link_rate, prev_link_rate);
8322 	}
8323 }
8324 
8325 /**
8326  * _scsih_pcie_topology_change_event - handle PCIe topology
8327  *  changes
8328  * @ioc: per adapter object
8329  * @fw_event: The fw_event_work object
8330  * Context: user.
8331  *
8332  */
8333 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8334 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8335 	struct fw_event_work *fw_event)
8336 {
8337 	int i;
8338 	u16 handle;
8339 	u16 reason_code;
8340 	u8 link_rate, prev_link_rate;
8341 	unsigned long flags;
8342 	int rc;
8343 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8344 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8345 	struct _pcie_device *pcie_device;
8346 
8347 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8348 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
8349 
8350 	if (ioc->shost_recovery || ioc->remove_host ||
8351 		ioc->pci_error_recovery)
8352 		return;
8353 
8354 	if (fw_event->ignore) {
8355 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8356 		return;
8357 	}
8358 
8359 	/* handle siblings events */
8360 	for (i = 0; i < event_data->NumEntries; i++) {
8361 		if (fw_event->ignore) {
8362 			dewtprintk(ioc,
8363 				   ioc_info(ioc, "ignoring switch event\n"));
8364 			return;
8365 		}
8366 		if (ioc->remove_host || ioc->pci_error_recovery)
8367 			return;
8368 		reason_code = event_data->PortEntry[i].PortStatus;
8369 		handle =
8370 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8371 		if (!handle)
8372 			continue;
8373 
8374 		link_rate = event_data->PortEntry[i].CurrentPortInfo
8375 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8376 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8377 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8378 
8379 		switch (reason_code) {
8380 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8381 			if (ioc->shost_recovery)
8382 				break;
8383 			if (link_rate == prev_link_rate)
8384 				break;
8385 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8386 				break;
8387 
8388 			_scsih_pcie_check_device(ioc, handle);
8389 
8390 			/* This code after this point handles the test case
8391 			 * where a device has been added, however its returning
8392 			 * BUSY for sometime.  Then before the Device Missing
8393 			 * Delay expires and the device becomes READY, the
8394 			 * device is removed and added back.
8395 			 */
8396 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8397 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8398 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8399 
8400 			if (pcie_device) {
8401 				pcie_device_put(pcie_device);
8402 				break;
8403 			}
8404 
8405 			if (!test_bit(handle, ioc->pend_os_device_add))
8406 				break;
8407 
8408 			dewtprintk(ioc,
8409 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8410 					    handle));
8411 			event_data->PortEntry[i].PortStatus &= 0xF0;
8412 			event_data->PortEntry[i].PortStatus |=
8413 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8414 			fallthrough;
8415 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8416 			if (ioc->shost_recovery)
8417 				break;
8418 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8419 				break;
8420 
8421 			rc = _scsih_pcie_add_device(ioc, handle);
8422 			if (!rc) {
8423 				/* mark entry vacant */
8424 				/* TODO This needs to be reviewed and fixed,
8425 				 * we dont have an entry
8426 				 * to make an event void like vacant
8427 				 */
8428 				event_data->PortEntry[i].PortStatus |=
8429 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8430 			}
8431 			break;
8432 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8433 			_scsih_pcie_device_remove_by_handle(ioc, handle);
8434 			break;
8435 		}
8436 	}
8437 }
8438 
8439 /**
8440  * _scsih_pcie_device_status_change_event_debug - debug for device event
8441  * @ioc: ?
8442  * @event_data: event data payload
8443  * Context: user.
8444  */
8445 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)8446 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8447 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8448 {
8449 	char *reason_str = NULL;
8450 
8451 	switch (event_data->ReasonCode) {
8452 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8453 		reason_str = "smart data";
8454 		break;
8455 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8456 		reason_str = "unsupported device discovered";
8457 		break;
8458 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8459 		reason_str = "internal device reset";
8460 		break;
8461 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8462 		reason_str = "internal task abort";
8463 		break;
8464 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8465 		reason_str = "internal task abort set";
8466 		break;
8467 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8468 		reason_str = "internal clear task set";
8469 		break;
8470 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8471 		reason_str = "internal query task";
8472 		break;
8473 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8474 		reason_str = "device init failure";
8475 		break;
8476 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8477 		reason_str = "internal device reset complete";
8478 		break;
8479 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8480 		reason_str = "internal task abort complete";
8481 		break;
8482 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8483 		reason_str = "internal async notification";
8484 		break;
8485 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8486 		reason_str = "pcie hot reset failed";
8487 		break;
8488 	default:
8489 		reason_str = "unknown reason";
8490 		break;
8491 	}
8492 
8493 	ioc_info(ioc, "PCIE device status change: (%s)\n"
8494 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8495 		 reason_str, le16_to_cpu(event_data->DevHandle),
8496 		 (u64)le64_to_cpu(event_data->WWID),
8497 		 le16_to_cpu(event_data->TaskTag));
8498 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8499 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8500 			event_data->ASC, event_data->ASCQ);
8501 	pr_cont("\n");
8502 }
8503 
8504 /**
8505  * _scsih_pcie_device_status_change_event - handle device status
8506  * change
8507  * @ioc: per adapter object
8508  * @fw_event: The fw_event_work object
8509  * Context: user.
8510  */
8511 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8512 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8513 	struct fw_event_work *fw_event)
8514 {
8515 	struct MPT3SAS_TARGET *target_priv_data;
8516 	struct _pcie_device *pcie_device;
8517 	u64 wwid;
8518 	unsigned long flags;
8519 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8520 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8521 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8522 		_scsih_pcie_device_status_change_event_debug(ioc,
8523 			event_data);
8524 
8525 	if (event_data->ReasonCode !=
8526 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8527 		event_data->ReasonCode !=
8528 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8529 		return;
8530 
8531 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8532 	wwid = le64_to_cpu(event_data->WWID);
8533 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8534 
8535 	if (!pcie_device || !pcie_device->starget)
8536 		goto out;
8537 
8538 	target_priv_data = pcie_device->starget->hostdata;
8539 	if (!target_priv_data)
8540 		goto out;
8541 
8542 	if (event_data->ReasonCode ==
8543 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8544 		target_priv_data->tm_busy = 1;
8545 	else
8546 		target_priv_data->tm_busy = 0;
8547 out:
8548 	if (pcie_device)
8549 		pcie_device_put(pcie_device);
8550 
8551 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8552 }
8553 
8554 /**
8555  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8556  * event
8557  * @ioc: per adapter object
8558  * @event_data: event data payload
8559  * Context: user.
8560  */
8561 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)8562 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8563 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8564 {
8565 	char *reason_str = NULL;
8566 
8567 	switch (event_data->ReasonCode) {
8568 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8569 		reason_str = "enclosure add";
8570 		break;
8571 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8572 		reason_str = "enclosure remove";
8573 		break;
8574 	default:
8575 		reason_str = "unknown reason";
8576 		break;
8577 	}
8578 
8579 	ioc_info(ioc, "enclosure status change: (%s)\n"
8580 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8581 		 reason_str,
8582 		 le16_to_cpu(event_data->EnclosureHandle),
8583 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8584 		 le16_to_cpu(event_data->StartSlot));
8585 }
8586 
8587 /**
8588  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8589  * @ioc: per adapter object
8590  * @fw_event: The fw_event_work object
8591  * Context: user.
8592  */
8593 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8594 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8595 	struct fw_event_work *fw_event)
8596 {
8597 	Mpi2ConfigReply_t mpi_reply;
8598 	struct _enclosure_node *enclosure_dev = NULL;
8599 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8600 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8601 	int rc;
8602 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8603 
8604 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8605 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8606 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
8607 		     fw_event->event_data);
8608 	if (ioc->shost_recovery)
8609 		return;
8610 
8611 	if (enclosure_handle)
8612 		enclosure_dev =
8613 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8614 						enclosure_handle);
8615 	switch (event_data->ReasonCode) {
8616 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8617 		if (!enclosure_dev) {
8618 			enclosure_dev =
8619 				kzalloc(sizeof(struct _enclosure_node),
8620 					GFP_KERNEL);
8621 			if (!enclosure_dev) {
8622 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
8623 					 __FILE__, __LINE__, __func__);
8624 				return;
8625 			}
8626 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8627 				&enclosure_dev->pg0,
8628 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8629 				enclosure_handle);
8630 
8631 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8632 						MPI2_IOCSTATUS_MASK)) {
8633 				kfree(enclosure_dev);
8634 				return;
8635 			}
8636 
8637 			list_add_tail(&enclosure_dev->list,
8638 							&ioc->enclosure_list);
8639 		}
8640 		break;
8641 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8642 		if (enclosure_dev) {
8643 			list_del(&enclosure_dev->list);
8644 			kfree(enclosure_dev);
8645 		}
8646 		break;
8647 	default:
8648 		break;
8649 	}
8650 }
8651 
8652 /**
8653  * _scsih_sas_broadcast_primitive_event - handle broadcast events
8654  * @ioc: per adapter object
8655  * @fw_event: The fw_event_work object
8656  * Context: user.
8657  */
8658 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8659 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8660 	struct fw_event_work *fw_event)
8661 {
8662 	struct scsi_cmnd *scmd;
8663 	struct scsi_device *sdev;
8664 	struct scsiio_tracker *st;
8665 	u16 smid, handle;
8666 	u32 lun;
8667 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8668 	u32 termination_count;
8669 	u32 query_count;
8670 	Mpi2SCSITaskManagementReply_t *mpi_reply;
8671 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8672 		(Mpi2EventDataSasBroadcastPrimitive_t *)
8673 		fw_event->event_data;
8674 	u16 ioc_status;
8675 	unsigned long flags;
8676 	int r;
8677 	u8 max_retries = 0;
8678 	u8 task_abort_retries;
8679 
8680 	mutex_lock(&ioc->tm_cmds.mutex);
8681 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8682 		 __func__, event_data->PhyNum, event_data->PortWidth);
8683 
8684 	_scsih_block_io_all_device(ioc);
8685 
8686 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8687 	mpi_reply = ioc->tm_cmds.reply;
8688  broadcast_aen_retry:
8689 
8690 	/* sanity checks for retrying this loop */
8691 	if (max_retries++ == 5) {
8692 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8693 		goto out;
8694 	} else if (max_retries > 1)
8695 		dewtprintk(ioc,
8696 			   ioc_info(ioc, "%s: %d retry\n",
8697 				    __func__, max_retries - 1));
8698 
8699 	termination_count = 0;
8700 	query_count = 0;
8701 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8702 		if (ioc->shost_recovery)
8703 			goto out;
8704 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8705 		if (!scmd)
8706 			continue;
8707 		st = scsi_cmd_priv(scmd);
8708 		sdev = scmd->device;
8709 		sas_device_priv_data = sdev->hostdata;
8710 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8711 			continue;
8712 		 /* skip hidden raid components */
8713 		if (sas_device_priv_data->sas_target->flags &
8714 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
8715 			continue;
8716 		 /* skip volumes */
8717 		if (sas_device_priv_data->sas_target->flags &
8718 		    MPT_TARGET_FLAGS_VOLUME)
8719 			continue;
8720 		 /* skip PCIe devices */
8721 		if (sas_device_priv_data->sas_target->flags &
8722 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
8723 			continue;
8724 
8725 		handle = sas_device_priv_data->sas_target->handle;
8726 		lun = sas_device_priv_data->lun;
8727 		query_count++;
8728 
8729 		if (ioc->shost_recovery)
8730 			goto out;
8731 
8732 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8733 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8734 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8735 			st->msix_io, 30, 0);
8736 		if (r == FAILED) {
8737 			sdev_printk(KERN_WARNING, sdev,
8738 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
8739 			    "QUERY_TASK: scmd(%p)\n", scmd);
8740 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8741 			goto broadcast_aen_retry;
8742 		}
8743 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8744 		    & MPI2_IOCSTATUS_MASK;
8745 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8746 			sdev_printk(KERN_WARNING, sdev,
8747 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8748 				ioc_status, scmd);
8749 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8750 			goto broadcast_aen_retry;
8751 		}
8752 
8753 		/* see if IO is still owned by IOC and target */
8754 		if (mpi_reply->ResponseCode ==
8755 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8756 		     mpi_reply->ResponseCode ==
8757 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8758 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8759 			continue;
8760 		}
8761 		task_abort_retries = 0;
8762  tm_retry:
8763 		if (task_abort_retries++ == 60) {
8764 			dewtprintk(ioc,
8765 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8766 					    __func__));
8767 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8768 			goto broadcast_aen_retry;
8769 		}
8770 
8771 		if (ioc->shost_recovery)
8772 			goto out_no_lock;
8773 
8774 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8775 			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8776 			st->smid, st->msix_io, 30, 0);
8777 		if (r == FAILED || st->cb_idx != 0xFF) {
8778 			sdev_printk(KERN_WARNING, sdev,
8779 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8780 			    "scmd(%p)\n", scmd);
8781 			goto tm_retry;
8782 		}
8783 
8784 		if (task_abort_retries > 1)
8785 			sdev_printk(KERN_WARNING, sdev,
8786 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8787 			    " scmd(%p)\n",
8788 			    task_abort_retries - 1, scmd);
8789 
8790 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8791 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8792 	}
8793 
8794 	if (ioc->broadcast_aen_pending) {
8795 		dewtprintk(ioc,
8796 			   ioc_info(ioc,
8797 				    "%s: loop back due to pending AEN\n",
8798 				    __func__));
8799 		 ioc->broadcast_aen_pending = 0;
8800 		 goto broadcast_aen_retry;
8801 	}
8802 
8803  out:
8804 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8805  out_no_lock:
8806 
8807 	dewtprintk(ioc,
8808 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8809 			    __func__, query_count, termination_count));
8810 
8811 	ioc->broadcast_aen_busy = 0;
8812 	if (!ioc->shost_recovery)
8813 		_scsih_ublock_io_all_device(ioc);
8814 	mutex_unlock(&ioc->tm_cmds.mutex);
8815 }
8816 
8817 /**
8818  * _scsih_sas_discovery_event - handle discovery events
8819  * @ioc: per adapter object
8820  * @fw_event: The fw_event_work object
8821  * Context: user.
8822  */
8823 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8824 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8825 	struct fw_event_work *fw_event)
8826 {
8827 	Mpi2EventDataSasDiscovery_t *event_data =
8828 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8829 
8830 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8831 		ioc_info(ioc, "discovery event: (%s)",
8832 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8833 			 "start" : "stop");
8834 		if (event_data->DiscoveryStatus)
8835 			pr_cont("discovery_status(0x%08x)",
8836 				le32_to_cpu(event_data->DiscoveryStatus));
8837 		pr_cont("\n");
8838 	}
8839 
8840 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8841 	    !ioc->sas_hba.num_phys) {
8842 		if (disable_discovery > 0 && ioc->shost_recovery) {
8843 			/* Wait for the reset to complete */
8844 			while (ioc->shost_recovery)
8845 				ssleep(1);
8846 		}
8847 		_scsih_sas_host_add(ioc);
8848 	}
8849 }
8850 
8851 /**
8852  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8853  *						events
8854  * @ioc: per adapter object
8855  * @fw_event: The fw_event_work object
8856  * Context: user.
8857  */
8858 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8859 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8860 	struct fw_event_work *fw_event)
8861 {
8862 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8863 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8864 
8865 	switch (event_data->ReasonCode) {
8866 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8867 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8868 			 le16_to_cpu(event_data->DevHandle),
8869 			 (u64)le64_to_cpu(event_data->SASAddress),
8870 			 event_data->PhysicalPort);
8871 		break;
8872 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8873 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8874 			 le16_to_cpu(event_data->DevHandle),
8875 			 (u64)le64_to_cpu(event_data->SASAddress),
8876 			 event_data->PhysicalPort);
8877 		break;
8878 	default:
8879 		break;
8880 	}
8881 }
8882 
8883 /**
8884  * _scsih_pcie_enumeration_event - handle enumeration events
8885  * @ioc: per adapter object
8886  * @fw_event: The fw_event_work object
8887  * Context: user.
8888  */
8889 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8890 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8891 	struct fw_event_work *fw_event)
8892 {
8893 	Mpi26EventDataPCIeEnumeration_t *event_data =
8894 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8895 
8896 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8897 		return;
8898 
8899 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8900 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8901 		 "started" : "completed",
8902 		 event_data->Flags);
8903 	if (event_data->EnumerationStatus)
8904 		pr_cont("enumeration_status(0x%08x)",
8905 			le32_to_cpu(event_data->EnumerationStatus));
8906 	pr_cont("\n");
8907 }
8908 
8909 /**
8910  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8911  * @ioc: per adapter object
8912  * @handle: device handle for physical disk
8913  * @phys_disk_num: physical disk number
8914  *
8915  * Return: 0 for success, else failure.
8916  */
8917 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)8918 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8919 {
8920 	Mpi2RaidActionRequest_t *mpi_request;
8921 	Mpi2RaidActionReply_t *mpi_reply;
8922 	u16 smid;
8923 	u8 issue_reset = 0;
8924 	int rc = 0;
8925 	u16 ioc_status;
8926 	u32 log_info;
8927 
8928 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8929 		return rc;
8930 
8931 	mutex_lock(&ioc->scsih_cmds.mutex);
8932 
8933 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8934 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8935 		rc = -EAGAIN;
8936 		goto out;
8937 	}
8938 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8939 
8940 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8941 	if (!smid) {
8942 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8943 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8944 		rc = -EAGAIN;
8945 		goto out;
8946 	}
8947 
8948 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8949 	ioc->scsih_cmds.smid = smid;
8950 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8951 
8952 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8953 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8954 	mpi_request->PhysDiskNum = phys_disk_num;
8955 
8956 	dewtprintk(ioc,
8957 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8958 			    handle, phys_disk_num));
8959 
8960 	init_completion(&ioc->scsih_cmds.done);
8961 	ioc->put_smid_default(ioc, smid);
8962 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8963 
8964 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8965 		mpt3sas_check_cmd_timeout(ioc,
8966 		    ioc->scsih_cmds.status, mpi_request,
8967 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8968 		rc = -EFAULT;
8969 		goto out;
8970 	}
8971 
8972 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8973 
8974 		mpi_reply = ioc->scsih_cmds.reply;
8975 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8976 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8977 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
8978 		else
8979 			log_info = 0;
8980 		ioc_status &= MPI2_IOCSTATUS_MASK;
8981 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8982 			dewtprintk(ioc,
8983 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8984 					    ioc_status, log_info));
8985 			rc = -EFAULT;
8986 		} else
8987 			dewtprintk(ioc,
8988 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8989 	}
8990 
8991  out:
8992 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8993 	mutex_unlock(&ioc->scsih_cmds.mutex);
8994 
8995 	if (issue_reset)
8996 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8997 	return rc;
8998 }
8999 
9000 /**
9001  * _scsih_reprobe_lun - reprobing lun
9002  * @sdev: scsi device struct
9003  * @no_uld_attach: sdev->no_uld_attach flag setting
9004  *
9005  **/
9006 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)9007 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
9008 {
9009 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
9010 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
9011 	    sdev->no_uld_attach ? "hiding" : "exposing");
9012 	WARN_ON(scsi_device_reprobe(sdev));
9013 }
9014 
9015 /**
9016  * _scsih_sas_volume_add - add new volume
9017  * @ioc: per adapter object
9018  * @element: IR config element data
9019  * Context: user.
9020  */
9021 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9022 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9023 	Mpi2EventIrConfigElement_t *element)
9024 {
9025 	struct _raid_device *raid_device;
9026 	unsigned long flags;
9027 	u64 wwid;
9028 	u16 handle = le16_to_cpu(element->VolDevHandle);
9029 	int rc;
9030 
9031 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9032 	if (!wwid) {
9033 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9034 			__FILE__, __LINE__, __func__);
9035 		return;
9036 	}
9037 
9038 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9039 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9040 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9041 
9042 	if (raid_device)
9043 		return;
9044 
9045 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9046 	if (!raid_device) {
9047 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9048 			__FILE__, __LINE__, __func__);
9049 		return;
9050 	}
9051 
9052 	raid_device->id = ioc->sas_id++;
9053 	raid_device->channel = RAID_CHANNEL;
9054 	raid_device->handle = handle;
9055 	raid_device->wwid = wwid;
9056 	_scsih_raid_device_add(ioc, raid_device);
9057 	if (!ioc->wait_for_discovery_to_complete) {
9058 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9059 		    raid_device->id, 0);
9060 		if (rc)
9061 			_scsih_raid_device_remove(ioc, raid_device);
9062 	} else {
9063 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9064 		_scsih_determine_boot_device(ioc, raid_device, 1);
9065 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9066 	}
9067 }
9068 
9069 /**
9070  * _scsih_sas_volume_delete - delete volume
9071  * @ioc: per adapter object
9072  * @handle: volume device handle
9073  * Context: user.
9074  */
9075 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)9076 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9077 {
9078 	struct _raid_device *raid_device;
9079 	unsigned long flags;
9080 	struct MPT3SAS_TARGET *sas_target_priv_data;
9081 	struct scsi_target *starget = NULL;
9082 
9083 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9084 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9085 	if (raid_device) {
9086 		if (raid_device->starget) {
9087 			starget = raid_device->starget;
9088 			sas_target_priv_data = starget->hostdata;
9089 			sas_target_priv_data->deleted = 1;
9090 		}
9091 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9092 			 raid_device->handle, (u64)raid_device->wwid);
9093 		list_del(&raid_device->list);
9094 		kfree(raid_device);
9095 	}
9096 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9097 	if (starget)
9098 		scsi_remove_target(&starget->dev);
9099 }
9100 
9101 /**
9102  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9103  * @ioc: per adapter object
9104  * @element: IR config element data
9105  * Context: user.
9106  */
9107 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9108 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9109 	Mpi2EventIrConfigElement_t *element)
9110 {
9111 	struct _sas_device *sas_device;
9112 	struct scsi_target *starget = NULL;
9113 	struct MPT3SAS_TARGET *sas_target_priv_data;
9114 	unsigned long flags;
9115 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9116 
9117 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9118 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9119 	if (sas_device) {
9120 		sas_device->volume_handle = 0;
9121 		sas_device->volume_wwid = 0;
9122 		clear_bit(handle, ioc->pd_handles);
9123 		if (sas_device->starget && sas_device->starget->hostdata) {
9124 			starget = sas_device->starget;
9125 			sas_target_priv_data = starget->hostdata;
9126 			sas_target_priv_data->flags &=
9127 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9128 		}
9129 	}
9130 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9131 	if (!sas_device)
9132 		return;
9133 
9134 	/* exposing raid component */
9135 	if (starget)
9136 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9137 
9138 	sas_device_put(sas_device);
9139 }
9140 
9141 /**
9142  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9143  * @ioc: per adapter object
9144  * @element: IR config element data
9145  * Context: user.
9146  */
9147 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9148 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9149 	Mpi2EventIrConfigElement_t *element)
9150 {
9151 	struct _sas_device *sas_device;
9152 	struct scsi_target *starget = NULL;
9153 	struct MPT3SAS_TARGET *sas_target_priv_data;
9154 	unsigned long flags;
9155 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9156 	u16 volume_handle = 0;
9157 	u64 volume_wwid = 0;
9158 
9159 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9160 	if (volume_handle)
9161 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9162 		    &volume_wwid);
9163 
9164 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9165 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9166 	if (sas_device) {
9167 		set_bit(handle, ioc->pd_handles);
9168 		if (sas_device->starget && sas_device->starget->hostdata) {
9169 			starget = sas_device->starget;
9170 			sas_target_priv_data = starget->hostdata;
9171 			sas_target_priv_data->flags |=
9172 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
9173 			sas_device->volume_handle = volume_handle;
9174 			sas_device->volume_wwid = volume_wwid;
9175 		}
9176 	}
9177 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9178 	if (!sas_device)
9179 		return;
9180 
9181 	/* hiding raid component */
9182 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9183 
9184 	if (starget)
9185 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9186 
9187 	sas_device_put(sas_device);
9188 }
9189 
9190 /**
9191  * _scsih_sas_pd_delete - delete pd component
9192  * @ioc: per adapter object
9193  * @element: IR config element data
9194  * Context: user.
9195  */
9196 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9197 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9198 	Mpi2EventIrConfigElement_t *element)
9199 {
9200 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9201 
9202 	_scsih_device_remove_by_handle(ioc, handle);
9203 }
9204 
9205 /**
9206  * _scsih_sas_pd_add - remove pd component
9207  * @ioc: per adapter object
9208  * @element: IR config element data
9209  * Context: user.
9210  */
9211 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9212 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9213 	Mpi2EventIrConfigElement_t *element)
9214 {
9215 	struct _sas_device *sas_device;
9216 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9217 	Mpi2ConfigReply_t mpi_reply;
9218 	Mpi2SasDevicePage0_t sas_device_pg0;
9219 	u32 ioc_status;
9220 	u64 sas_address;
9221 	u16 parent_handle;
9222 
9223 	set_bit(handle, ioc->pd_handles);
9224 
9225 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9226 	if (sas_device) {
9227 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9228 		sas_device_put(sas_device);
9229 		return;
9230 	}
9231 
9232 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9233 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9234 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9235 			__FILE__, __LINE__, __func__);
9236 		return;
9237 	}
9238 
9239 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9240 	    MPI2_IOCSTATUS_MASK;
9241 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9242 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9243 			__FILE__, __LINE__, __func__);
9244 		return;
9245 	}
9246 
9247 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9248 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9249 		mpt3sas_transport_update_links(ioc, sas_address, handle,
9250 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9251 		    mpt3sas_get_port_by_id(ioc,
9252 		    sas_device_pg0.PhysicalPort, 0));
9253 
9254 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9255 	_scsih_add_device(ioc, handle, 0, 1);
9256 }
9257 
9258 /**
9259  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9260  * @ioc: per adapter object
9261  * @event_data: event data payload
9262  * Context: user.
9263  */
9264 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)9265 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9266 	Mpi2EventDataIrConfigChangeList_t *event_data)
9267 {
9268 	Mpi2EventIrConfigElement_t *element;
9269 	u8 element_type;
9270 	int i;
9271 	char *reason_str = NULL, *element_str = NULL;
9272 
9273 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9274 
9275 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9276 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9277 		 "foreign" : "native",
9278 		 event_data->NumElements);
9279 	for (i = 0; i < event_data->NumElements; i++, element++) {
9280 		switch (element->ReasonCode) {
9281 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9282 			reason_str = "add";
9283 			break;
9284 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9285 			reason_str = "remove";
9286 			break;
9287 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9288 			reason_str = "no change";
9289 			break;
9290 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9291 			reason_str = "hide";
9292 			break;
9293 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9294 			reason_str = "unhide";
9295 			break;
9296 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9297 			reason_str = "volume_created";
9298 			break;
9299 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9300 			reason_str = "volume_deleted";
9301 			break;
9302 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9303 			reason_str = "pd_created";
9304 			break;
9305 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9306 			reason_str = "pd_deleted";
9307 			break;
9308 		default:
9309 			reason_str = "unknown reason";
9310 			break;
9311 		}
9312 		element_type = le16_to_cpu(element->ElementFlags) &
9313 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9314 		switch (element_type) {
9315 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9316 			element_str = "volume";
9317 			break;
9318 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9319 			element_str = "phys disk";
9320 			break;
9321 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9322 			element_str = "hot spare";
9323 			break;
9324 		default:
9325 			element_str = "unknown element";
9326 			break;
9327 		}
9328 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
9329 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9330 		    reason_str, le16_to_cpu(element->VolDevHandle),
9331 		    le16_to_cpu(element->PhysDiskDevHandle),
9332 		    element->PhysDiskNum);
9333 	}
9334 }
9335 
9336 /**
9337  * _scsih_sas_ir_config_change_event - handle ir configuration change events
9338  * @ioc: per adapter object
9339  * @fw_event: The fw_event_work object
9340  * Context: user.
9341  */
9342 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9343 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9344 	struct fw_event_work *fw_event)
9345 {
9346 	Mpi2EventIrConfigElement_t *element;
9347 	int i;
9348 	u8 foreign_config;
9349 	Mpi2EventDataIrConfigChangeList_t *event_data =
9350 		(Mpi2EventDataIrConfigChangeList_t *)
9351 		fw_event->event_data;
9352 
9353 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9354 	     (!ioc->hide_ir_msg))
9355 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
9356 
9357 	foreign_config = (le32_to_cpu(event_data->Flags) &
9358 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9359 
9360 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9361 	if (ioc->shost_recovery &&
9362 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9363 		for (i = 0; i < event_data->NumElements; i++, element++) {
9364 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9365 				_scsih_ir_fastpath(ioc,
9366 					le16_to_cpu(element->PhysDiskDevHandle),
9367 					element->PhysDiskNum);
9368 		}
9369 		return;
9370 	}
9371 
9372 	for (i = 0; i < event_data->NumElements; i++, element++) {
9373 
9374 		switch (element->ReasonCode) {
9375 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9376 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9377 			if (!foreign_config)
9378 				_scsih_sas_volume_add(ioc, element);
9379 			break;
9380 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9381 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9382 			if (!foreign_config)
9383 				_scsih_sas_volume_delete(ioc,
9384 				    le16_to_cpu(element->VolDevHandle));
9385 			break;
9386 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9387 			if (!ioc->is_warpdrive)
9388 				_scsih_sas_pd_hide(ioc, element);
9389 			break;
9390 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9391 			if (!ioc->is_warpdrive)
9392 				_scsih_sas_pd_expose(ioc, element);
9393 			break;
9394 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9395 			if (!ioc->is_warpdrive)
9396 				_scsih_sas_pd_add(ioc, element);
9397 			break;
9398 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9399 			if (!ioc->is_warpdrive)
9400 				_scsih_sas_pd_delete(ioc, element);
9401 			break;
9402 		}
9403 	}
9404 }
9405 
9406 /**
9407  * _scsih_sas_ir_volume_event - IR volume event
9408  * @ioc: per adapter object
9409  * @fw_event: The fw_event_work object
9410  * Context: user.
9411  */
9412 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9413 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9414 	struct fw_event_work *fw_event)
9415 {
9416 	u64 wwid;
9417 	unsigned long flags;
9418 	struct _raid_device *raid_device;
9419 	u16 handle;
9420 	u32 state;
9421 	int rc;
9422 	Mpi2EventDataIrVolume_t *event_data =
9423 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
9424 
9425 	if (ioc->shost_recovery)
9426 		return;
9427 
9428 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9429 		return;
9430 
9431 	handle = le16_to_cpu(event_data->VolDevHandle);
9432 	state = le32_to_cpu(event_data->NewValue);
9433 	if (!ioc->hide_ir_msg)
9434 		dewtprintk(ioc,
9435 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9436 				    __func__, handle,
9437 				    le32_to_cpu(event_data->PreviousValue),
9438 				    state));
9439 	switch (state) {
9440 	case MPI2_RAID_VOL_STATE_MISSING:
9441 	case MPI2_RAID_VOL_STATE_FAILED:
9442 		_scsih_sas_volume_delete(ioc, handle);
9443 		break;
9444 
9445 	case MPI2_RAID_VOL_STATE_ONLINE:
9446 	case MPI2_RAID_VOL_STATE_DEGRADED:
9447 	case MPI2_RAID_VOL_STATE_OPTIMAL:
9448 
9449 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9450 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9451 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9452 
9453 		if (raid_device)
9454 			break;
9455 
9456 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9457 		if (!wwid) {
9458 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9459 				__FILE__, __LINE__, __func__);
9460 			break;
9461 		}
9462 
9463 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9464 		if (!raid_device) {
9465 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9466 				__FILE__, __LINE__, __func__);
9467 			break;
9468 		}
9469 
9470 		raid_device->id = ioc->sas_id++;
9471 		raid_device->channel = RAID_CHANNEL;
9472 		raid_device->handle = handle;
9473 		raid_device->wwid = wwid;
9474 		_scsih_raid_device_add(ioc, raid_device);
9475 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9476 		    raid_device->id, 0);
9477 		if (rc)
9478 			_scsih_raid_device_remove(ioc, raid_device);
9479 		break;
9480 
9481 	case MPI2_RAID_VOL_STATE_INITIALIZING:
9482 	default:
9483 		break;
9484 	}
9485 }
9486 
9487 /**
9488  * _scsih_sas_ir_physical_disk_event - PD event
9489  * @ioc: per adapter object
9490  * @fw_event: The fw_event_work object
9491  * Context: user.
9492  */
9493 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9494 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9495 	struct fw_event_work *fw_event)
9496 {
9497 	u16 handle, parent_handle;
9498 	u32 state;
9499 	struct _sas_device *sas_device;
9500 	Mpi2ConfigReply_t mpi_reply;
9501 	Mpi2SasDevicePage0_t sas_device_pg0;
9502 	u32 ioc_status;
9503 	Mpi2EventDataIrPhysicalDisk_t *event_data =
9504 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9505 	u64 sas_address;
9506 
9507 	if (ioc->shost_recovery)
9508 		return;
9509 
9510 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9511 		return;
9512 
9513 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9514 	state = le32_to_cpu(event_data->NewValue);
9515 
9516 	if (!ioc->hide_ir_msg)
9517 		dewtprintk(ioc,
9518 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9519 				    __func__, handle,
9520 				    le32_to_cpu(event_data->PreviousValue),
9521 				    state));
9522 
9523 	switch (state) {
9524 	case MPI2_RAID_PD_STATE_ONLINE:
9525 	case MPI2_RAID_PD_STATE_DEGRADED:
9526 	case MPI2_RAID_PD_STATE_REBUILDING:
9527 	case MPI2_RAID_PD_STATE_OPTIMAL:
9528 	case MPI2_RAID_PD_STATE_HOT_SPARE:
9529 
9530 		if (!ioc->is_warpdrive)
9531 			set_bit(handle, ioc->pd_handles);
9532 
9533 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9534 		if (sas_device) {
9535 			sas_device_put(sas_device);
9536 			return;
9537 		}
9538 
9539 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9540 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9541 		    handle))) {
9542 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9543 				__FILE__, __LINE__, __func__);
9544 			return;
9545 		}
9546 
9547 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9548 		    MPI2_IOCSTATUS_MASK;
9549 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9550 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9551 				__FILE__, __LINE__, __func__);
9552 			return;
9553 		}
9554 
9555 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9556 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9557 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9558 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9559 			    mpt3sas_get_port_by_id(ioc,
9560 			    sas_device_pg0.PhysicalPort, 0));
9561 
9562 		_scsih_add_device(ioc, handle, 0, 1);
9563 
9564 		break;
9565 
9566 	case MPI2_RAID_PD_STATE_OFFLINE:
9567 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9568 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9569 	default:
9570 		break;
9571 	}
9572 }
9573 
9574 /**
9575  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9576  * @ioc: per adapter object
9577  * @event_data: event data payload
9578  * Context: user.
9579  */
9580 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)9581 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9582 	Mpi2EventDataIrOperationStatus_t *event_data)
9583 {
9584 	char *reason_str = NULL;
9585 
9586 	switch (event_data->RAIDOperation) {
9587 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
9588 		reason_str = "resync";
9589 		break;
9590 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9591 		reason_str = "online capacity expansion";
9592 		break;
9593 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9594 		reason_str = "consistency check";
9595 		break;
9596 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9597 		reason_str = "background init";
9598 		break;
9599 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9600 		reason_str = "make data consistent";
9601 		break;
9602 	}
9603 
9604 	if (!reason_str)
9605 		return;
9606 
9607 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9608 		 reason_str,
9609 		 le16_to_cpu(event_data->VolDevHandle),
9610 		 event_data->PercentComplete);
9611 }
9612 
9613 /**
9614  * _scsih_sas_ir_operation_status_event - handle RAID operation events
9615  * @ioc: per adapter object
9616  * @fw_event: The fw_event_work object
9617  * Context: user.
9618  */
9619 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9620 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9621 	struct fw_event_work *fw_event)
9622 {
9623 	Mpi2EventDataIrOperationStatus_t *event_data =
9624 		(Mpi2EventDataIrOperationStatus_t *)
9625 		fw_event->event_data;
9626 	static struct _raid_device *raid_device;
9627 	unsigned long flags;
9628 	u16 handle;
9629 
9630 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9631 	    (!ioc->hide_ir_msg))
9632 		_scsih_sas_ir_operation_status_event_debug(ioc,
9633 		     event_data);
9634 
9635 	/* code added for raid transport support */
9636 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9637 
9638 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9639 		handle = le16_to_cpu(event_data->VolDevHandle);
9640 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9641 		if (raid_device)
9642 			raid_device->percent_complete =
9643 			    event_data->PercentComplete;
9644 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9645 	}
9646 }
9647 
9648 /**
9649  * _scsih_prep_device_scan - initialize parameters prior to device scan
9650  * @ioc: per adapter object
9651  *
9652  * Set the deleted flag prior to device scan.  If the device is found during
9653  * the scan, then we clear the deleted flag.
9654  */
9655 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)9656 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9657 {
9658 	struct MPT3SAS_DEVICE *sas_device_priv_data;
9659 	struct scsi_device *sdev;
9660 
9661 	shost_for_each_device(sdev, ioc->shost) {
9662 		sas_device_priv_data = sdev->hostdata;
9663 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
9664 			sas_device_priv_data->sas_target->deleted = 1;
9665 	}
9666 }
9667 
9668 /**
9669  * _scsih_update_device_qdepth - Update QD during Reset.
9670  * @ioc: per adapter object
9671  *
9672  */
9673 static void
_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER * ioc)9674 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9675 {
9676 	struct MPT3SAS_DEVICE *sas_device_priv_data;
9677 	struct MPT3SAS_TARGET *sas_target_priv_data;
9678 	struct _sas_device *sas_device;
9679 	struct scsi_device *sdev;
9680 	u16 qdepth;
9681 
9682 	ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9683 	shost_for_each_device(sdev, ioc->shost) {
9684 		sas_device_priv_data = sdev->hostdata;
9685 		if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9686 			sas_target_priv_data = sas_device_priv_data->sas_target;
9687 			sas_device = sas_device_priv_data->sas_target->sas_dev;
9688 			if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9689 				qdepth = ioc->max_nvme_qd;
9690 			else if (sas_device &&
9691 			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9692 				qdepth = (sas_device->port_type > 1) ?
9693 				    ioc->max_wideport_qd : ioc->max_narrowport_qd;
9694 			else if (sas_device &&
9695 			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9696 				qdepth = ioc->max_sata_qd;
9697 			else
9698 				continue;
9699 			mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9700 		}
9701 	}
9702 }
9703 
9704 /**
9705  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9706  * @ioc: per adapter object
9707  * @sas_device_pg0: SAS Device page 0
9708  *
9709  * After host reset, find out whether devices are still responding.
9710  * Used in _scsih_remove_unresponsive_sas_devices.
9711  */
9712 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)9713 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9714 Mpi2SasDevicePage0_t *sas_device_pg0)
9715 {
9716 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9717 	struct scsi_target *starget;
9718 	struct _sas_device *sas_device = NULL;
9719 	struct _enclosure_node *enclosure_dev = NULL;
9720 	unsigned long flags;
9721 	struct hba_port *port = mpt3sas_get_port_by_id(
9722 	    ioc, sas_device_pg0->PhysicalPort, 0);
9723 
9724 	if (sas_device_pg0->EnclosureHandle) {
9725 		enclosure_dev =
9726 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9727 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
9728 		if (enclosure_dev == NULL)
9729 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9730 				 sas_device_pg0->EnclosureHandle);
9731 	}
9732 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9733 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9734 		if (sas_device->sas_address != le64_to_cpu(
9735 		    sas_device_pg0->SASAddress))
9736 			continue;
9737 		if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9738 			continue;
9739 		if (sas_device->port != port)
9740 			continue;
9741 		sas_device->responding = 1;
9742 		starget = sas_device->starget;
9743 		if (starget && starget->hostdata) {
9744 			sas_target_priv_data = starget->hostdata;
9745 			sas_target_priv_data->tm_busy = 0;
9746 			sas_target_priv_data->deleted = 0;
9747 		} else
9748 			sas_target_priv_data = NULL;
9749 		if (starget) {
9750 			starget_printk(KERN_INFO, starget,
9751 			    "handle(0x%04x), sas_addr(0x%016llx)\n",
9752 			    le16_to_cpu(sas_device_pg0->DevHandle),
9753 			    (unsigned long long)
9754 			    sas_device->sas_address);
9755 
9756 			if (sas_device->enclosure_handle != 0)
9757 				starget_printk(KERN_INFO, starget,
9758 				 "enclosure logical id(0x%016llx), slot(%d)\n",
9759 				 (unsigned long long)
9760 				 sas_device->enclosure_logical_id,
9761 				 sas_device->slot);
9762 		}
9763 		if (le16_to_cpu(sas_device_pg0->Flags) &
9764 		      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9765 			sas_device->enclosure_level =
9766 			   sas_device_pg0->EnclosureLevel;
9767 			memcpy(&sas_device->connector_name[0],
9768 				&sas_device_pg0->ConnectorName[0], 4);
9769 		} else {
9770 			sas_device->enclosure_level = 0;
9771 			sas_device->connector_name[0] = '\0';
9772 		}
9773 
9774 		sas_device->enclosure_handle =
9775 			le16_to_cpu(sas_device_pg0->EnclosureHandle);
9776 		sas_device->is_chassis_slot_valid = 0;
9777 		if (enclosure_dev) {
9778 			sas_device->enclosure_logical_id = le64_to_cpu(
9779 				enclosure_dev->pg0.EnclosureLogicalID);
9780 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9781 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9782 				sas_device->is_chassis_slot_valid = 1;
9783 				sas_device->chassis_slot =
9784 					enclosure_dev->pg0.ChassisSlot;
9785 			}
9786 		}
9787 
9788 		if (sas_device->handle == le16_to_cpu(
9789 		    sas_device_pg0->DevHandle))
9790 			goto out;
9791 		pr_info("\thandle changed from(0x%04x)!!!\n",
9792 		    sas_device->handle);
9793 		sas_device->handle = le16_to_cpu(
9794 		    sas_device_pg0->DevHandle);
9795 		if (sas_target_priv_data)
9796 			sas_target_priv_data->handle =
9797 			    le16_to_cpu(sas_device_pg0->DevHandle);
9798 		goto out;
9799 	}
9800  out:
9801 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9802 }
9803 
9804 /**
9805  * _scsih_create_enclosure_list_after_reset - Free Existing list,
9806  *	And create enclosure list by scanning all Enclosure Page(0)s
9807  * @ioc: per adapter object
9808  */
9809 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)9810 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9811 {
9812 	struct _enclosure_node *enclosure_dev;
9813 	Mpi2ConfigReply_t mpi_reply;
9814 	u16 enclosure_handle;
9815 	int rc;
9816 
9817 	/* Free existing enclosure list */
9818 	mpt3sas_free_enclosure_list(ioc);
9819 
9820 	/* Re constructing enclosure list after reset*/
9821 	enclosure_handle = 0xFFFF;
9822 	do {
9823 		enclosure_dev =
9824 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9825 		if (!enclosure_dev) {
9826 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9827 				__FILE__, __LINE__, __func__);
9828 			return;
9829 		}
9830 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9831 				&enclosure_dev->pg0,
9832 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9833 				enclosure_handle);
9834 
9835 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9836 						MPI2_IOCSTATUS_MASK)) {
9837 			kfree(enclosure_dev);
9838 			return;
9839 		}
9840 		list_add_tail(&enclosure_dev->list,
9841 						&ioc->enclosure_list);
9842 		enclosure_handle =
9843 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9844 	} while (1);
9845 }
9846 
9847 /**
9848  * _scsih_search_responding_sas_devices -
9849  * @ioc: per adapter object
9850  *
9851  * After host reset, find out whether devices are still responding.
9852  * If not remove.
9853  */
9854 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)9855 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9856 {
9857 	Mpi2SasDevicePage0_t sas_device_pg0;
9858 	Mpi2ConfigReply_t mpi_reply;
9859 	u16 ioc_status;
9860 	u16 handle;
9861 	u32 device_info;
9862 
9863 	ioc_info(ioc, "search for end-devices: start\n");
9864 
9865 	if (list_empty(&ioc->sas_device_list))
9866 		goto out;
9867 
9868 	handle = 0xFFFF;
9869 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9870 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9871 	    handle))) {
9872 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9873 		    MPI2_IOCSTATUS_MASK;
9874 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9875 			break;
9876 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9877 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9878 		if (!(_scsih_is_end_device(device_info)))
9879 			continue;
9880 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9881 	}
9882 
9883  out:
9884 	ioc_info(ioc, "search for end-devices: complete\n");
9885 }
9886 
9887 /**
9888  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9889  * @ioc: per adapter object
9890  * @pcie_device_pg0: PCIe Device page 0
9891  *
9892  * After host reset, find out whether devices are still responding.
9893  * Used in _scsih_remove_unresponding_devices.
9894  */
9895 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)9896 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9897 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9898 {
9899 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9900 	struct scsi_target *starget;
9901 	struct _pcie_device *pcie_device;
9902 	unsigned long flags;
9903 
9904 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9905 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9906 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9907 		    && (pcie_device->slot == le16_to_cpu(
9908 		    pcie_device_pg0->Slot))) {
9909 			pcie_device->access_status =
9910 					pcie_device_pg0->AccessStatus;
9911 			pcie_device->responding = 1;
9912 			starget = pcie_device->starget;
9913 			if (starget && starget->hostdata) {
9914 				sas_target_priv_data = starget->hostdata;
9915 				sas_target_priv_data->tm_busy = 0;
9916 				sas_target_priv_data->deleted = 0;
9917 			} else
9918 				sas_target_priv_data = NULL;
9919 			if (starget) {
9920 				starget_printk(KERN_INFO, starget,
9921 				    "handle(0x%04x), wwid(0x%016llx) ",
9922 				    pcie_device->handle,
9923 				    (unsigned long long)pcie_device->wwid);
9924 				if (pcie_device->enclosure_handle != 0)
9925 					starget_printk(KERN_INFO, starget,
9926 					    "enclosure logical id(0x%016llx), "
9927 					    "slot(%d)\n",
9928 					    (unsigned long long)
9929 					    pcie_device->enclosure_logical_id,
9930 					    pcie_device->slot);
9931 			}
9932 
9933 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9934 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9935 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9936 				pcie_device->enclosure_level =
9937 				    pcie_device_pg0->EnclosureLevel;
9938 				memcpy(&pcie_device->connector_name[0],
9939 				    &pcie_device_pg0->ConnectorName[0], 4);
9940 			} else {
9941 				pcie_device->enclosure_level = 0;
9942 				pcie_device->connector_name[0] = '\0';
9943 			}
9944 
9945 			if (pcie_device->handle == le16_to_cpu(
9946 			    pcie_device_pg0->DevHandle))
9947 				goto out;
9948 			pr_info("\thandle changed from(0x%04x)!!!\n",
9949 			    pcie_device->handle);
9950 			pcie_device->handle = le16_to_cpu(
9951 			    pcie_device_pg0->DevHandle);
9952 			if (sas_target_priv_data)
9953 				sas_target_priv_data->handle =
9954 				    le16_to_cpu(pcie_device_pg0->DevHandle);
9955 			goto out;
9956 		}
9957 	}
9958 
9959  out:
9960 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9961 }
9962 
9963 /**
9964  * _scsih_search_responding_pcie_devices -
9965  * @ioc: per adapter object
9966  *
9967  * After host reset, find out whether devices are still responding.
9968  * If not remove.
9969  */
9970 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)9971 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9972 {
9973 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9974 	Mpi2ConfigReply_t mpi_reply;
9975 	u16 ioc_status;
9976 	u16 handle;
9977 	u32 device_info;
9978 
9979 	ioc_info(ioc, "search for end-devices: start\n");
9980 
9981 	if (list_empty(&ioc->pcie_device_list))
9982 		goto out;
9983 
9984 	handle = 0xFFFF;
9985 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9986 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9987 		handle))) {
9988 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9989 		    MPI2_IOCSTATUS_MASK;
9990 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9991 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9992 				 __func__, ioc_status,
9993 				 le32_to_cpu(mpi_reply.IOCLogInfo));
9994 			break;
9995 		}
9996 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9997 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9998 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9999 			continue;
10000 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
10001 	}
10002 out:
10003 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
10004 }
10005 
10006 /**
10007  * _scsih_mark_responding_raid_device - mark a raid_device as responding
10008  * @ioc: per adapter object
10009  * @wwid: world wide identifier for raid volume
10010  * @handle: device handle
10011  *
10012  * After host reset, find out whether devices are still responding.
10013  * Used in _scsih_remove_unresponsive_raid_devices.
10014  */
10015 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)10016 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10017 	u16 handle)
10018 {
10019 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10020 	struct scsi_target *starget;
10021 	struct _raid_device *raid_device;
10022 	unsigned long flags;
10023 
10024 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
10025 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10026 		if (raid_device->wwid == wwid && raid_device->starget) {
10027 			starget = raid_device->starget;
10028 			if (starget && starget->hostdata) {
10029 				sas_target_priv_data = starget->hostdata;
10030 				sas_target_priv_data->deleted = 0;
10031 			} else
10032 				sas_target_priv_data = NULL;
10033 			raid_device->responding = 1;
10034 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10035 			starget_printk(KERN_INFO, raid_device->starget,
10036 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
10037 			    (unsigned long long)raid_device->wwid);
10038 
10039 			/*
10040 			 * WARPDRIVE: The handles of the PDs might have changed
10041 			 * across the host reset so re-initialize the
10042 			 * required data for Direct IO
10043 			 */
10044 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
10045 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
10046 			if (raid_device->handle == handle) {
10047 				spin_unlock_irqrestore(&ioc->raid_device_lock,
10048 				    flags);
10049 				return;
10050 			}
10051 			pr_info("\thandle changed from(0x%04x)!!!\n",
10052 			    raid_device->handle);
10053 			raid_device->handle = handle;
10054 			if (sas_target_priv_data)
10055 				sas_target_priv_data->handle = handle;
10056 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10057 			return;
10058 		}
10059 	}
10060 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10061 }
10062 
10063 /**
10064  * _scsih_search_responding_raid_devices -
10065  * @ioc: per adapter object
10066  *
10067  * After host reset, find out whether devices are still responding.
10068  * If not remove.
10069  */
10070 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)10071 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10072 {
10073 	Mpi2RaidVolPage1_t volume_pg1;
10074 	Mpi2RaidVolPage0_t volume_pg0;
10075 	Mpi2RaidPhysDiskPage0_t pd_pg0;
10076 	Mpi2ConfigReply_t mpi_reply;
10077 	u16 ioc_status;
10078 	u16 handle;
10079 	u8 phys_disk_num;
10080 
10081 	if (!ioc->ir_firmware)
10082 		return;
10083 
10084 	ioc_info(ioc, "search for raid volumes: start\n");
10085 
10086 	if (list_empty(&ioc->raid_device_list))
10087 		goto out;
10088 
10089 	handle = 0xFFFF;
10090 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10091 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10092 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10093 		    MPI2_IOCSTATUS_MASK;
10094 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10095 			break;
10096 		handle = le16_to_cpu(volume_pg1.DevHandle);
10097 
10098 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10099 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10100 		     sizeof(Mpi2RaidVolPage0_t)))
10101 			continue;
10102 
10103 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10104 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10105 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10106 			_scsih_mark_responding_raid_device(ioc,
10107 			    le64_to_cpu(volume_pg1.WWID), handle);
10108 	}
10109 
10110 	/* refresh the pd_handles */
10111 	if (!ioc->is_warpdrive) {
10112 		phys_disk_num = 0xFF;
10113 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10114 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10115 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10116 		    phys_disk_num))) {
10117 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10118 			    MPI2_IOCSTATUS_MASK;
10119 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10120 				break;
10121 			phys_disk_num = pd_pg0.PhysDiskNum;
10122 			handle = le16_to_cpu(pd_pg0.DevHandle);
10123 			set_bit(handle, ioc->pd_handles);
10124 		}
10125 	}
10126  out:
10127 	ioc_info(ioc, "search for responding raid volumes: complete\n");
10128 }
10129 
10130 /**
10131  * _scsih_mark_responding_expander - mark a expander as responding
10132  * @ioc: per adapter object
10133  * @expander_pg0:SAS Expander Config Page0
10134  *
10135  * After host reset, find out whether devices are still responding.
10136  * Used in _scsih_remove_unresponsive_expanders.
10137  */
10138 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)10139 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10140 	Mpi2ExpanderPage0_t *expander_pg0)
10141 {
10142 	struct _sas_node *sas_expander = NULL;
10143 	unsigned long flags;
10144 	int i;
10145 	struct _enclosure_node *enclosure_dev = NULL;
10146 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10147 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10148 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10149 	struct hba_port *port = mpt3sas_get_port_by_id(
10150 	    ioc, expander_pg0->PhysicalPort, 0);
10151 
10152 	if (enclosure_handle)
10153 		enclosure_dev =
10154 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
10155 							enclosure_handle);
10156 
10157 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10158 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10159 		if (sas_expander->sas_address != sas_address)
10160 			continue;
10161 		if (sas_expander->port != port)
10162 			continue;
10163 		sas_expander->responding = 1;
10164 
10165 		if (enclosure_dev) {
10166 			sas_expander->enclosure_logical_id =
10167 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10168 			sas_expander->enclosure_handle =
10169 			    le16_to_cpu(expander_pg0->EnclosureHandle);
10170 		}
10171 
10172 		if (sas_expander->handle == handle)
10173 			goto out;
10174 		pr_info("\texpander(0x%016llx): handle changed" \
10175 		    " from(0x%04x) to (0x%04x)!!!\n",
10176 		    (unsigned long long)sas_expander->sas_address,
10177 		    sas_expander->handle, handle);
10178 		sas_expander->handle = handle;
10179 		for (i = 0 ; i < sas_expander->num_phys ; i++)
10180 			sas_expander->phy[i].handle = handle;
10181 		goto out;
10182 	}
10183  out:
10184 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10185 }
10186 
10187 /**
10188  * _scsih_search_responding_expanders -
10189  * @ioc: per adapter object
10190  *
10191  * After host reset, find out whether devices are still responding.
10192  * If not remove.
10193  */
10194 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)10195 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10196 {
10197 	Mpi2ExpanderPage0_t expander_pg0;
10198 	Mpi2ConfigReply_t mpi_reply;
10199 	u16 ioc_status;
10200 	u64 sas_address;
10201 	u16 handle;
10202 	u8 port;
10203 
10204 	ioc_info(ioc, "search for expanders: start\n");
10205 
10206 	if (list_empty(&ioc->sas_expander_list))
10207 		goto out;
10208 
10209 	handle = 0xFFFF;
10210 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10211 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10212 
10213 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10214 		    MPI2_IOCSTATUS_MASK;
10215 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10216 			break;
10217 
10218 		handle = le16_to_cpu(expander_pg0.DevHandle);
10219 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
10220 		port = expander_pg0.PhysicalPort;
10221 		pr_info(
10222 		    "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10223 		    handle, (unsigned long long)sas_address,
10224 		    (ioc->multipath_on_hba ?
10225 		    port : MULTIPATH_DISABLED_PORT_ID));
10226 		_scsih_mark_responding_expander(ioc, &expander_pg0);
10227 	}
10228 
10229  out:
10230 	ioc_info(ioc, "search for expanders: complete\n");
10231 }
10232 
10233 /**
10234  * _scsih_remove_unresponding_devices - removing unresponding devices
10235  * @ioc: per adapter object
10236  */
10237 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)10238 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10239 {
10240 	struct _sas_device *sas_device, *sas_device_next;
10241 	struct _sas_node *sas_expander, *sas_expander_next;
10242 	struct _raid_device *raid_device, *raid_device_next;
10243 	struct _pcie_device *pcie_device, *pcie_device_next;
10244 	struct list_head tmp_list;
10245 	unsigned long flags;
10246 	LIST_HEAD(head);
10247 
10248 	ioc_info(ioc, "removing unresponding devices: start\n");
10249 
10250 	/* removing unresponding end devices */
10251 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
10252 	/*
10253 	 * Iterate, pulling off devices marked as non-responding. We become the
10254 	 * owner for the reference the list had on any object we prune.
10255 	 */
10256 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10257 
10258 	/*
10259 	 * Clean up the sas_device_init_list list as
10260 	 * driver goes for fresh scan as part of diag reset.
10261 	 */
10262 	list_for_each_entry_safe(sas_device, sas_device_next,
10263 	    &ioc->sas_device_init_list, list) {
10264 		list_del_init(&sas_device->list);
10265 		sas_device_put(sas_device);
10266 	}
10267 
10268 	list_for_each_entry_safe(sas_device, sas_device_next,
10269 	    &ioc->sas_device_list, list) {
10270 		if (!sas_device->responding)
10271 			list_move_tail(&sas_device->list, &head);
10272 		else
10273 			sas_device->responding = 0;
10274 	}
10275 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10276 
10277 	/*
10278 	 * Now, uninitialize and remove the unresponding devices we pruned.
10279 	 */
10280 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10281 		_scsih_remove_device(ioc, sas_device);
10282 		list_del_init(&sas_device->list);
10283 		sas_device_put(sas_device);
10284 	}
10285 
10286 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10287 	INIT_LIST_HEAD(&head);
10288 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10289 	/*
10290 	 * Clean up the pcie_device_init_list list as
10291 	 * driver goes for fresh scan as part of diag reset.
10292 	 */
10293 	list_for_each_entry_safe(pcie_device, pcie_device_next,
10294 	    &ioc->pcie_device_init_list, list) {
10295 		list_del_init(&pcie_device->list);
10296 		pcie_device_put(pcie_device);
10297 	}
10298 
10299 	list_for_each_entry_safe(pcie_device, pcie_device_next,
10300 	    &ioc->pcie_device_list, list) {
10301 		if (!pcie_device->responding)
10302 			list_move_tail(&pcie_device->list, &head);
10303 		else
10304 			pcie_device->responding = 0;
10305 	}
10306 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10307 
10308 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10309 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10310 		list_del_init(&pcie_device->list);
10311 		pcie_device_put(pcie_device);
10312 	}
10313 
10314 	/* removing unresponding volumes */
10315 	if (ioc->ir_firmware) {
10316 		ioc_info(ioc, "removing unresponding devices: volumes\n");
10317 		list_for_each_entry_safe(raid_device, raid_device_next,
10318 		    &ioc->raid_device_list, list) {
10319 			if (!raid_device->responding)
10320 				_scsih_sas_volume_delete(ioc,
10321 				    raid_device->handle);
10322 			else
10323 				raid_device->responding = 0;
10324 		}
10325 	}
10326 
10327 	/* removing unresponding expanders */
10328 	ioc_info(ioc, "removing unresponding devices: expanders\n");
10329 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10330 	INIT_LIST_HEAD(&tmp_list);
10331 	list_for_each_entry_safe(sas_expander, sas_expander_next,
10332 	    &ioc->sas_expander_list, list) {
10333 		if (!sas_expander->responding)
10334 			list_move_tail(&sas_expander->list, &tmp_list);
10335 		else
10336 			sas_expander->responding = 0;
10337 	}
10338 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10339 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10340 	    list) {
10341 		_scsih_expander_node_remove(ioc, sas_expander);
10342 	}
10343 
10344 	ioc_info(ioc, "removing unresponding devices: complete\n");
10345 
10346 	/* unblock devices */
10347 	_scsih_ublock_io_all_device(ioc);
10348 }
10349 
10350 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)10351 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10352 	struct _sas_node *sas_expander, u16 handle)
10353 {
10354 	Mpi2ExpanderPage1_t expander_pg1;
10355 	Mpi2ConfigReply_t mpi_reply;
10356 	int i;
10357 
10358 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
10359 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10360 		    &expander_pg1, i, handle))) {
10361 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10362 				__FILE__, __LINE__, __func__);
10363 			return;
10364 		}
10365 
10366 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10367 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10368 		    expander_pg1.NegotiatedLinkRate >> 4,
10369 		    sas_expander->port);
10370 	}
10371 }
10372 
10373 /**
10374  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10375  * @ioc: per adapter object
10376  */
10377 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)10378 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10379 {
10380 	Mpi2ExpanderPage0_t expander_pg0;
10381 	Mpi2SasDevicePage0_t sas_device_pg0;
10382 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
10383 	Mpi2RaidVolPage1_t volume_pg1;
10384 	Mpi2RaidVolPage0_t volume_pg0;
10385 	Mpi2RaidPhysDiskPage0_t pd_pg0;
10386 	Mpi2EventIrConfigElement_t element;
10387 	Mpi2ConfigReply_t mpi_reply;
10388 	u8 phys_disk_num, port_id;
10389 	u16 ioc_status;
10390 	u16 handle, parent_handle;
10391 	u64 sas_address;
10392 	struct _sas_device *sas_device;
10393 	struct _pcie_device *pcie_device;
10394 	struct _sas_node *expander_device;
10395 	static struct _raid_device *raid_device;
10396 	u8 retry_count;
10397 	unsigned long flags;
10398 
10399 	ioc_info(ioc, "scan devices: start\n");
10400 
10401 	_scsih_sas_host_refresh(ioc);
10402 
10403 	ioc_info(ioc, "\tscan devices: expanders start\n");
10404 
10405 	/* expanders */
10406 	handle = 0xFFFF;
10407 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10408 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10409 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10410 		    MPI2_IOCSTATUS_MASK;
10411 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10412 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10413 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10414 			break;
10415 		}
10416 		handle = le16_to_cpu(expander_pg0.DevHandle);
10417 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
10418 		port_id = expander_pg0.PhysicalPort;
10419 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10420 		    ioc, le64_to_cpu(expander_pg0.SASAddress),
10421 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10422 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10423 		if (expander_device)
10424 			_scsih_refresh_expander_links(ioc, expander_device,
10425 			    handle);
10426 		else {
10427 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10428 				 handle,
10429 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10430 			_scsih_expander_add(ioc, handle);
10431 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10432 				 handle,
10433 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10434 		}
10435 	}
10436 
10437 	ioc_info(ioc, "\tscan devices: expanders complete\n");
10438 
10439 	if (!ioc->ir_firmware)
10440 		goto skip_to_sas;
10441 
10442 	ioc_info(ioc, "\tscan devices: phys disk start\n");
10443 
10444 	/* phys disk */
10445 	phys_disk_num = 0xFF;
10446 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10447 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10448 	    phys_disk_num))) {
10449 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10450 		    MPI2_IOCSTATUS_MASK;
10451 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10452 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10453 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10454 			break;
10455 		}
10456 		phys_disk_num = pd_pg0.PhysDiskNum;
10457 		handle = le16_to_cpu(pd_pg0.DevHandle);
10458 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10459 		if (sas_device) {
10460 			sas_device_put(sas_device);
10461 			continue;
10462 		}
10463 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10464 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10465 		    handle) != 0)
10466 			continue;
10467 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10468 		    MPI2_IOCSTATUS_MASK;
10469 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10470 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10471 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10472 			break;
10473 		}
10474 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10475 		if (!_scsih_get_sas_address(ioc, parent_handle,
10476 		    &sas_address)) {
10477 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10478 				 handle,
10479 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10480 			port_id = sas_device_pg0.PhysicalPort;
10481 			mpt3sas_transport_update_links(ioc, sas_address,
10482 			    handle, sas_device_pg0.PhyNum,
10483 			    MPI2_SAS_NEG_LINK_RATE_1_5,
10484 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10485 			set_bit(handle, ioc->pd_handles);
10486 			retry_count = 0;
10487 			/* This will retry adding the end device.
10488 			 * _scsih_add_device() will decide on retries and
10489 			 * return "1" when it should be retried
10490 			 */
10491 			while (_scsih_add_device(ioc, handle, retry_count++,
10492 			    1)) {
10493 				ssleep(1);
10494 			}
10495 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10496 				 handle,
10497 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10498 		}
10499 	}
10500 
10501 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
10502 
10503 	ioc_info(ioc, "\tscan devices: volumes start\n");
10504 
10505 	/* volumes */
10506 	handle = 0xFFFF;
10507 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10508 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10509 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10510 		    MPI2_IOCSTATUS_MASK;
10511 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10512 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10513 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10514 			break;
10515 		}
10516 		handle = le16_to_cpu(volume_pg1.DevHandle);
10517 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10518 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
10519 		    le64_to_cpu(volume_pg1.WWID));
10520 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10521 		if (raid_device)
10522 			continue;
10523 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10524 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10525 		     sizeof(Mpi2RaidVolPage0_t)))
10526 			continue;
10527 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10528 		    MPI2_IOCSTATUS_MASK;
10529 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10530 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10531 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10532 			break;
10533 		}
10534 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10535 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10536 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10537 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10538 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10539 			element.VolDevHandle = volume_pg1.DevHandle;
10540 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10541 				 volume_pg1.DevHandle);
10542 			_scsih_sas_volume_add(ioc, &element);
10543 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10544 				 volume_pg1.DevHandle);
10545 		}
10546 	}
10547 
10548 	ioc_info(ioc, "\tscan devices: volumes complete\n");
10549 
10550  skip_to_sas:
10551 
10552 	ioc_info(ioc, "\tscan devices: end devices start\n");
10553 
10554 	/* sas devices */
10555 	handle = 0xFFFF;
10556 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10557 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10558 	    handle))) {
10559 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10560 		    MPI2_IOCSTATUS_MASK;
10561 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10562 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10563 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10564 			break;
10565 		}
10566 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
10567 		if (!(_scsih_is_end_device(
10568 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
10569 			continue;
10570 		port_id = sas_device_pg0.PhysicalPort;
10571 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
10572 		    le64_to_cpu(sas_device_pg0.SASAddress),
10573 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10574 		if (sas_device) {
10575 			sas_device_put(sas_device);
10576 			continue;
10577 		}
10578 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10579 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10580 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10581 				 handle,
10582 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10583 			mpt3sas_transport_update_links(ioc, sas_address, handle,
10584 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10585 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10586 			retry_count = 0;
10587 			/* This will retry adding the end device.
10588 			 * _scsih_add_device() will decide on retries and
10589 			 * return "1" when it should be retried
10590 			 */
10591 			while (_scsih_add_device(ioc, handle, retry_count++,
10592 			    0)) {
10593 				ssleep(1);
10594 			}
10595 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10596 				 handle,
10597 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10598 		}
10599 	}
10600 	ioc_info(ioc, "\tscan devices: end devices complete\n");
10601 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10602 
10603 	/* pcie devices */
10604 	handle = 0xFFFF;
10605 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10606 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10607 		handle))) {
10608 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10609 				& MPI2_IOCSTATUS_MASK;
10610 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10611 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10612 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10613 			break;
10614 		}
10615 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10616 		if (!(_scsih_is_nvme_pciescsi_device(
10617 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10618 			continue;
10619 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10620 				le64_to_cpu(pcie_device_pg0.WWID));
10621 		if (pcie_device) {
10622 			pcie_device_put(pcie_device);
10623 			continue;
10624 		}
10625 		retry_count = 0;
10626 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10627 		_scsih_pcie_add_device(ioc, handle);
10628 
10629 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10630 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10631 	}
10632 
10633 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10634 	ioc_info(ioc, "scan devices: complete\n");
10635 }
10636 
10637 /**
10638  * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10639  * @ioc: per adapter object
10640  *
10641  * The handler for doing any required cleanup or initialization.
10642  */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)10643 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10644 {
10645 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10646 }
10647 
10648 /**
10649  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10650  *							scsi & tm cmds.
10651  * @ioc: per adapter object
10652  *
10653  * The handler for doing any required cleanup or initialization.
10654  */
10655 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)10656 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10657 {
10658 	dtmprintk(ioc,
10659 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10660 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10661 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10662 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10663 		complete(&ioc->scsih_cmds.done);
10664 	}
10665 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10666 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
10667 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10668 		complete(&ioc->tm_cmds.done);
10669 	}
10670 
10671 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10672 	memset(ioc->device_remove_in_progress, 0,
10673 	       ioc->device_remove_in_progress_sz);
10674 	_scsih_fw_event_cleanup_queue(ioc);
10675 	_scsih_flush_running_cmds(ioc);
10676 }
10677 
10678 /**
10679  * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10680  * @ioc: per adapter object
10681  *
10682  * The handler for doing any required cleanup or initialization.
10683  */
10684 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)10685 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10686 {
10687 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10688 	if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10689 		if (ioc->multipath_on_hba) {
10690 			_scsih_sas_port_refresh(ioc);
10691 			_scsih_update_vphys_after_reset(ioc);
10692 		}
10693 		_scsih_prep_device_scan(ioc);
10694 		_scsih_create_enclosure_list_after_reset(ioc);
10695 		_scsih_search_responding_sas_devices(ioc);
10696 		_scsih_search_responding_pcie_devices(ioc);
10697 		_scsih_search_responding_raid_devices(ioc);
10698 		_scsih_search_responding_expanders(ioc);
10699 		_scsih_error_recovery_delete_devices(ioc);
10700 	}
10701 }
10702 
10703 /**
10704  * _mpt3sas_fw_work - delayed task for processing firmware events
10705  * @ioc: per adapter object
10706  * @fw_event: The fw_event_work object
10707  * Context: user.
10708  */
10709 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10710 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10711 {
10712 	ioc->current_event = fw_event;
10713 	_scsih_fw_event_del_from_list(ioc, fw_event);
10714 
10715 	/* the queue is being flushed so ignore this event */
10716 	if (ioc->remove_host || ioc->pci_error_recovery) {
10717 		fw_event_work_put(fw_event);
10718 		ioc->current_event = NULL;
10719 		return;
10720 	}
10721 
10722 	switch (fw_event->event) {
10723 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
10724 		mpt3sas_process_trigger_data(ioc,
10725 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10726 			fw_event->event_data);
10727 		break;
10728 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10729 		while (scsi_host_in_recovery(ioc->shost) ||
10730 					 ioc->shost_recovery) {
10731 			/*
10732 			 * If we're unloading or cancelling the work, bail.
10733 			 * Otherwise, this can become an infinite loop.
10734 			 */
10735 			if (ioc->remove_host || ioc->fw_events_cleanup)
10736 				goto out;
10737 			ssleep(1);
10738 		}
10739 		_scsih_remove_unresponding_devices(ioc);
10740 		_scsih_del_dirty_vphy(ioc);
10741 		_scsih_del_dirty_port_entries(ioc);
10742 		if (ioc->is_gen35_ioc)
10743 			_scsih_update_device_qdepth(ioc);
10744 		_scsih_scan_for_devices_after_reset(ioc);
10745 		/*
10746 		 * If diag reset has occurred during the driver load
10747 		 * then driver has to complete the driver load operation
10748 		 * by executing the following items:
10749 		 *- Register the devices from sas_device_init_list to SML
10750 		 *- clear is_driver_loading flag,
10751 		 *- start the watchdog thread.
10752 		 * In happy driver load path, above things are taken care of when
10753 		 * driver executes scsih_scan_finished().
10754 		 */
10755 		if (ioc->is_driver_loading)
10756 			_scsih_complete_devices_scanning(ioc);
10757 		_scsih_set_nvme_max_shutdown_latency(ioc);
10758 		break;
10759 	case MPT3SAS_PORT_ENABLE_COMPLETE:
10760 		ioc->start_scan = 0;
10761 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
10762 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10763 			    missing_delay[1]);
10764 		dewtprintk(ioc,
10765 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
10766 		break;
10767 	case MPT3SAS_TURN_ON_PFA_LED:
10768 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10769 		break;
10770 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10771 		_scsih_sas_topology_change_event(ioc, fw_event);
10772 		break;
10773 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10774 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10775 			_scsih_sas_device_status_change_event_debug(ioc,
10776 			    (Mpi2EventDataSasDeviceStatusChange_t *)
10777 			    fw_event->event_data);
10778 		break;
10779 	case MPI2_EVENT_SAS_DISCOVERY:
10780 		_scsih_sas_discovery_event(ioc, fw_event);
10781 		break;
10782 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10783 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
10784 		break;
10785 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10786 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
10787 		break;
10788 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10789 		_scsih_sas_enclosure_dev_status_change_event(ioc,
10790 		    fw_event);
10791 		break;
10792 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10793 		_scsih_sas_ir_config_change_event(ioc, fw_event);
10794 		break;
10795 	case MPI2_EVENT_IR_VOLUME:
10796 		_scsih_sas_ir_volume_event(ioc, fw_event);
10797 		break;
10798 	case MPI2_EVENT_IR_PHYSICAL_DISK:
10799 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
10800 		break;
10801 	case MPI2_EVENT_IR_OPERATION_STATUS:
10802 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
10803 		break;
10804 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10805 		_scsih_pcie_device_status_change_event(ioc, fw_event);
10806 		break;
10807 	case MPI2_EVENT_PCIE_ENUMERATION:
10808 		_scsih_pcie_enumeration_event(ioc, fw_event);
10809 		break;
10810 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10811 		_scsih_pcie_topology_change_event(ioc, fw_event);
10812 		break;
10813 	}
10814 out:
10815 	fw_event_work_put(fw_event);
10816 	ioc->current_event = NULL;
10817 }
10818 
10819 /**
10820  * _firmware_event_work
10821  * @work: The fw_event_work object
10822  * Context: user.
10823  *
10824  * wrappers for the work thread handling firmware events
10825  */
10826 
10827 static void
_firmware_event_work(struct work_struct * work)10828 _firmware_event_work(struct work_struct *work)
10829 {
10830 	struct fw_event_work *fw_event = container_of(work,
10831 	    struct fw_event_work, work);
10832 
10833 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
10834 }
10835 
10836 /**
10837  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10838  * @ioc: per adapter object
10839  * @msix_index: MSIX table index supplied by the OS
10840  * @reply: reply message frame(lower 32bit addr)
10841  * Context: interrupt.
10842  *
10843  * This function merely adds a new work task into ioc->firmware_event_thread.
10844  * The tasks are worked from _firmware_event_work in user context.
10845  *
10846  * Return: 1 meaning mf should be freed from _base_interrupt
10847  *         0 means the mf is freed from this function.
10848  */
10849 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)10850 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10851 	u32 reply)
10852 {
10853 	struct fw_event_work *fw_event;
10854 	Mpi2EventNotificationReply_t *mpi_reply;
10855 	u16 event;
10856 	u16 sz;
10857 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10858 
10859 	/* events turned off due to host reset */
10860 	if (ioc->pci_error_recovery)
10861 		return 1;
10862 
10863 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10864 
10865 	if (unlikely(!mpi_reply)) {
10866 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10867 			__FILE__, __LINE__, __func__);
10868 		return 1;
10869 	}
10870 
10871 	event = le16_to_cpu(mpi_reply->Event);
10872 
10873 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10874 		mpt3sas_trigger_event(ioc, event, 0);
10875 
10876 	switch (event) {
10877 	/* handle these */
10878 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10879 	{
10880 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10881 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
10882 		    mpi_reply->EventData;
10883 
10884 		if (baen_data->Primitive !=
10885 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10886 			return 1;
10887 
10888 		if (ioc->broadcast_aen_busy) {
10889 			ioc->broadcast_aen_pending++;
10890 			return 1;
10891 		} else
10892 			ioc->broadcast_aen_busy = 1;
10893 		break;
10894 	}
10895 
10896 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10897 		_scsih_check_topo_delete_events(ioc,
10898 		    (Mpi2EventDataSasTopologyChangeList_t *)
10899 		    mpi_reply->EventData);
10900 		/*
10901 		 * No need to add the topology change list
10902 		 * event to fw event work queue when
10903 		 * diag reset is going on. Since during diag
10904 		 * reset driver scan the devices by reading
10905 		 * sas device page0's not by processing the
10906 		 * events.
10907 		 */
10908 		if (ioc->shost_recovery)
10909 			return 1;
10910 		break;
10911 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10912 	_scsih_check_pcie_topo_remove_events(ioc,
10913 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
10914 		    mpi_reply->EventData);
10915 		if (ioc->shost_recovery)
10916 			return 1;
10917 		break;
10918 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10919 		_scsih_check_ir_config_unhide_events(ioc,
10920 		    (Mpi2EventDataIrConfigChangeList_t *)
10921 		    mpi_reply->EventData);
10922 		break;
10923 	case MPI2_EVENT_IR_VOLUME:
10924 		_scsih_check_volume_delete_events(ioc,
10925 		    (Mpi2EventDataIrVolume_t *)
10926 		    mpi_reply->EventData);
10927 		break;
10928 	case MPI2_EVENT_LOG_ENTRY_ADDED:
10929 	{
10930 		Mpi2EventDataLogEntryAdded_t *log_entry;
10931 		u32 log_code;
10932 
10933 		if (!ioc->is_warpdrive)
10934 			break;
10935 
10936 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
10937 		    mpi_reply->EventData;
10938 		log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10939 
10940 		if (le16_to_cpu(log_entry->LogEntryQualifier)
10941 		    != MPT2_WARPDRIVE_LOGENTRY)
10942 			break;
10943 
10944 		switch (log_code) {
10945 		case MPT2_WARPDRIVE_LC_SSDT:
10946 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10947 			break;
10948 		case MPT2_WARPDRIVE_LC_SSDLW:
10949 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10950 			break;
10951 		case MPT2_WARPDRIVE_LC_SSDLF:
10952 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10953 			break;
10954 		case MPT2_WARPDRIVE_LC_BRMF:
10955 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10956 			break;
10957 		}
10958 
10959 		break;
10960 	}
10961 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10962 		_scsih_sas_device_status_change_event(ioc,
10963 		    (Mpi2EventDataSasDeviceStatusChange_t *)
10964 		    mpi_reply->EventData);
10965 		break;
10966 	case MPI2_EVENT_IR_OPERATION_STATUS:
10967 	case MPI2_EVENT_SAS_DISCOVERY:
10968 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10969 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10970 	case MPI2_EVENT_IR_PHYSICAL_DISK:
10971 	case MPI2_EVENT_PCIE_ENUMERATION:
10972 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10973 		break;
10974 
10975 	case MPI2_EVENT_TEMP_THRESHOLD:
10976 		_scsih_temp_threshold_events(ioc,
10977 			(Mpi2EventDataTemperature_t *)
10978 			mpi_reply->EventData);
10979 		break;
10980 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10981 		ActiveCableEventData =
10982 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10983 		switch (ActiveCableEventData->ReasonCode) {
10984 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10985 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10986 				   ActiveCableEventData->ReceptacleID);
10987 			pr_notice("cannot be powered and devices connected\n");
10988 			pr_notice("to this active cable will not be seen\n");
10989 			pr_notice("This active cable requires %d mW of power\n",
10990 			    le32_to_cpu(
10991 			    ActiveCableEventData->ActiveCablePowerRequirement));
10992 			break;
10993 
10994 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10995 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10996 				   ActiveCableEventData->ReceptacleID);
10997 			pr_notice(
10998 			    "is not running at optimal speed(12 Gb/s rate)\n");
10999 			break;
11000 		}
11001 
11002 		break;
11003 
11004 	default: /* ignore the rest */
11005 		return 1;
11006 	}
11007 
11008 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
11009 	fw_event = alloc_fw_event_work(sz);
11010 	if (!fw_event) {
11011 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11012 			__FILE__, __LINE__, __func__);
11013 		return 1;
11014 	}
11015 
11016 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11017 	fw_event->ioc = ioc;
11018 	fw_event->VF_ID = mpi_reply->VF_ID;
11019 	fw_event->VP_ID = mpi_reply->VP_ID;
11020 	fw_event->event = event;
11021 	_scsih_fw_event_add(ioc, fw_event);
11022 	fw_event_work_put(fw_event);
11023 	return 1;
11024 }
11025 
11026 /**
11027  * _scsih_expander_node_remove - removing expander device from list.
11028  * @ioc: per adapter object
11029  * @sas_expander: the sas_device object
11030  *
11031  * Removing object and freeing associated memory from the
11032  * ioc->sas_expander_list.
11033  */
11034 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)11035 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11036 	struct _sas_node *sas_expander)
11037 {
11038 	struct _sas_port *mpt3sas_port, *next;
11039 	unsigned long flags;
11040 	int port_id;
11041 
11042 	/* remove sibling ports attached to this expander */
11043 	list_for_each_entry_safe(mpt3sas_port, next,
11044 	   &sas_expander->sas_port_list, port_list) {
11045 		if (ioc->shost_recovery)
11046 			return;
11047 		if (mpt3sas_port->remote_identify.device_type ==
11048 		    SAS_END_DEVICE)
11049 			mpt3sas_device_remove_by_sas_address(ioc,
11050 			    mpt3sas_port->remote_identify.sas_address,
11051 			    mpt3sas_port->hba_port);
11052 		else if (mpt3sas_port->remote_identify.device_type ==
11053 		    SAS_EDGE_EXPANDER_DEVICE ||
11054 		    mpt3sas_port->remote_identify.device_type ==
11055 		    SAS_FANOUT_EXPANDER_DEVICE)
11056 			mpt3sas_expander_remove(ioc,
11057 			    mpt3sas_port->remote_identify.sas_address,
11058 			    mpt3sas_port->hba_port);
11059 	}
11060 
11061 	port_id = sas_expander->port->port_id;
11062 
11063 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11064 	    sas_expander->sas_address_parent, sas_expander->port);
11065 
11066 	ioc_info(ioc,
11067 	    "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11068 	    sas_expander->handle, (unsigned long long)
11069 	    sas_expander->sas_address,
11070 	    port_id);
11071 
11072 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
11073 	list_del(&sas_expander->list);
11074 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11075 
11076 	kfree(sas_expander->phy);
11077 	kfree(sas_expander);
11078 }
11079 
11080 /**
11081  * _scsih_nvme_shutdown - NVMe shutdown notification
11082  * @ioc: per adapter object
11083  *
11084  * Sending IoUnitControl request with shutdown operation code to alert IOC that
11085  * the host system is shutting down so that IOC can issue NVMe shutdown to
11086  * NVMe drives attached to it.
11087  */
11088 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)11089 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11090 {
11091 	Mpi26IoUnitControlRequest_t *mpi_request;
11092 	Mpi26IoUnitControlReply_t *mpi_reply;
11093 	u16 smid;
11094 
11095 	/* are there any NVMe devices ? */
11096 	if (list_empty(&ioc->pcie_device_list))
11097 		return;
11098 
11099 	mutex_lock(&ioc->scsih_cmds.mutex);
11100 
11101 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11102 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11103 		goto out;
11104 	}
11105 
11106 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11107 
11108 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11109 	if (!smid) {
11110 		ioc_err(ioc,
11111 		    "%s: failed obtaining a smid\n", __func__);
11112 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11113 		goto out;
11114 	}
11115 
11116 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11117 	ioc->scsih_cmds.smid = smid;
11118 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11119 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11120 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11121 
11122 	init_completion(&ioc->scsih_cmds.done);
11123 	ioc->put_smid_default(ioc, smid);
11124 	/* Wait for max_shutdown_latency seconds */
11125 	ioc_info(ioc,
11126 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11127 		ioc->max_shutdown_latency);
11128 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
11129 			ioc->max_shutdown_latency*HZ);
11130 
11131 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11132 		ioc_err(ioc, "%s: timeout\n", __func__);
11133 		goto out;
11134 	}
11135 
11136 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11137 		mpi_reply = ioc->scsih_cmds.reply;
11138 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
11139 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
11140 			le16_to_cpu(mpi_reply->IOCStatus),
11141 			le32_to_cpu(mpi_reply->IOCLogInfo));
11142 	}
11143  out:
11144 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11145 	mutex_unlock(&ioc->scsih_cmds.mutex);
11146 }
11147 
11148 
11149 /**
11150  * _scsih_ir_shutdown - IR shutdown notification
11151  * @ioc: per adapter object
11152  *
11153  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11154  * the host system is shutting down.
11155  */
11156 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)11157 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11158 {
11159 	Mpi2RaidActionRequest_t *mpi_request;
11160 	Mpi2RaidActionReply_t *mpi_reply;
11161 	u16 smid;
11162 
11163 	/* is IR firmware build loaded ? */
11164 	if (!ioc->ir_firmware)
11165 		return;
11166 
11167 	/* are there any volumes ? */
11168 	if (list_empty(&ioc->raid_device_list))
11169 		return;
11170 
11171 	mutex_lock(&ioc->scsih_cmds.mutex);
11172 
11173 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11174 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11175 		goto out;
11176 	}
11177 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11178 
11179 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11180 	if (!smid) {
11181 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11182 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11183 		goto out;
11184 	}
11185 
11186 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11187 	ioc->scsih_cmds.smid = smid;
11188 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11189 
11190 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11191 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11192 
11193 	if (!ioc->hide_ir_msg)
11194 		ioc_info(ioc, "IR shutdown (sending)\n");
11195 	init_completion(&ioc->scsih_cmds.done);
11196 	ioc->put_smid_default(ioc, smid);
11197 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11198 
11199 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11200 		ioc_err(ioc, "%s: timeout\n", __func__);
11201 		goto out;
11202 	}
11203 
11204 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11205 		mpi_reply = ioc->scsih_cmds.reply;
11206 		if (!ioc->hide_ir_msg)
11207 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11208 				 le16_to_cpu(mpi_reply->IOCStatus),
11209 				 le32_to_cpu(mpi_reply->IOCLogInfo));
11210 	}
11211 
11212  out:
11213 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11214 	mutex_unlock(&ioc->scsih_cmds.mutex);
11215 }
11216 
11217 /**
11218  * _scsih_get_shost_and_ioc - get shost and ioc
11219  *			and verify whether they are NULL or not
11220  * @pdev: PCI device struct
11221  * @shost: address of scsi host pointer
11222  * @ioc: address of HBA adapter pointer
11223  *
11224  * Return zero if *shost and *ioc are not NULL otherwise return error number.
11225  */
11226 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)11227 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11228 	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11229 {
11230 	*shost = pci_get_drvdata(pdev);
11231 	if (*shost == NULL) {
11232 		dev_err(&pdev->dev, "pdev's driver data is null\n");
11233 		return -ENXIO;
11234 	}
11235 
11236 	*ioc = shost_priv(*shost);
11237 	if (*ioc == NULL) {
11238 		dev_err(&pdev->dev, "shost's private data is null\n");
11239 		return -ENXIO;
11240 	}
11241 
11242 	return 0;
11243 }
11244 
11245 /**
11246  * scsih_remove - detach and remove add host
11247  * @pdev: PCI device struct
11248  *
11249  * Routine called when unloading the driver.
11250  */
scsih_remove(struct pci_dev * pdev)11251 static void scsih_remove(struct pci_dev *pdev)
11252 {
11253 	struct Scsi_Host *shost;
11254 	struct MPT3SAS_ADAPTER *ioc;
11255 	struct _sas_port *mpt3sas_port, *next_port;
11256 	struct _raid_device *raid_device, *next;
11257 	struct MPT3SAS_TARGET *sas_target_priv_data;
11258 	struct _pcie_device *pcie_device, *pcienext;
11259 	struct workqueue_struct	*wq;
11260 	unsigned long flags;
11261 	Mpi2ConfigReply_t mpi_reply;
11262 	struct hba_port *port, *port_next;
11263 
11264 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11265 		return;
11266 
11267 	ioc->remove_host = 1;
11268 
11269 	if (!pci_device_is_present(pdev)) {
11270 		mpt3sas_base_pause_mq_polling(ioc);
11271 		_scsih_flush_running_cmds(ioc);
11272 	}
11273 
11274 	_scsih_fw_event_cleanup_queue(ioc);
11275 
11276 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11277 	wq = ioc->firmware_event_thread;
11278 	ioc->firmware_event_thread = NULL;
11279 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11280 	if (wq)
11281 		destroy_workqueue(wq);
11282 	/*
11283 	 * Copy back the unmodified ioc page1. so that on next driver load,
11284 	 * current modified changes on ioc page1 won't take effect.
11285 	 */
11286 	if (ioc->is_aero_ioc)
11287 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11288 				&ioc->ioc_pg1_copy);
11289 	/* release all the volumes */
11290 	_scsih_ir_shutdown(ioc);
11291 	mpt3sas_destroy_debugfs(ioc);
11292 	sas_remove_host(shost);
11293 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11294 	    list) {
11295 		if (raid_device->starget) {
11296 			sas_target_priv_data =
11297 			    raid_device->starget->hostdata;
11298 			sas_target_priv_data->deleted = 1;
11299 			scsi_remove_target(&raid_device->starget->dev);
11300 		}
11301 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11302 			 raid_device->handle, (u64)raid_device->wwid);
11303 		_scsih_raid_device_remove(ioc, raid_device);
11304 	}
11305 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11306 		list) {
11307 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11308 		list_del_init(&pcie_device->list);
11309 		pcie_device_put(pcie_device);
11310 	}
11311 
11312 	/* free ports attached to the sas_host */
11313 	list_for_each_entry_safe(mpt3sas_port, next_port,
11314 	   &ioc->sas_hba.sas_port_list, port_list) {
11315 		if (mpt3sas_port->remote_identify.device_type ==
11316 		    SAS_END_DEVICE)
11317 			mpt3sas_device_remove_by_sas_address(ioc,
11318 			    mpt3sas_port->remote_identify.sas_address,
11319 			    mpt3sas_port->hba_port);
11320 		else if (mpt3sas_port->remote_identify.device_type ==
11321 		    SAS_EDGE_EXPANDER_DEVICE ||
11322 		    mpt3sas_port->remote_identify.device_type ==
11323 		    SAS_FANOUT_EXPANDER_DEVICE)
11324 			mpt3sas_expander_remove(ioc,
11325 			    mpt3sas_port->remote_identify.sas_address,
11326 			    mpt3sas_port->hba_port);
11327 	}
11328 
11329 	list_for_each_entry_safe(port, port_next,
11330 	    &ioc->port_table_list, list) {
11331 		list_del(&port->list);
11332 		kfree(port);
11333 	}
11334 
11335 	/* free phys attached to the sas_host */
11336 	if (ioc->sas_hba.num_phys) {
11337 		kfree(ioc->sas_hba.phy);
11338 		ioc->sas_hba.phy = NULL;
11339 		ioc->sas_hba.num_phys = 0;
11340 	}
11341 
11342 	mpt3sas_base_detach(ioc);
11343 	mpt3sas_ctl_release(ioc);
11344 	spin_lock(&gioc_lock);
11345 	list_del(&ioc->list);
11346 	spin_unlock(&gioc_lock);
11347 	scsi_host_put(shost);
11348 }
11349 
11350 /**
11351  * scsih_shutdown - routine call during system shutdown
11352  * @pdev: PCI device struct
11353  */
11354 static void
scsih_shutdown(struct pci_dev * pdev)11355 scsih_shutdown(struct pci_dev *pdev)
11356 {
11357 	struct Scsi_Host *shost;
11358 	struct MPT3SAS_ADAPTER *ioc;
11359 	struct workqueue_struct	*wq;
11360 	unsigned long flags;
11361 	Mpi2ConfigReply_t mpi_reply;
11362 
11363 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11364 		return;
11365 
11366 	ioc->remove_host = 1;
11367 
11368 	if (!pci_device_is_present(pdev)) {
11369 		mpt3sas_base_pause_mq_polling(ioc);
11370 		_scsih_flush_running_cmds(ioc);
11371 	}
11372 
11373 	_scsih_fw_event_cleanup_queue(ioc);
11374 
11375 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11376 	wq = ioc->firmware_event_thread;
11377 	ioc->firmware_event_thread = NULL;
11378 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11379 	if (wq)
11380 		destroy_workqueue(wq);
11381 	/*
11382 	 * Copy back the unmodified ioc page1 so that on next driver load,
11383 	 * current modified changes on ioc page1 won't take effect.
11384 	 */
11385 	if (ioc->is_aero_ioc)
11386 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11387 				&ioc->ioc_pg1_copy);
11388 
11389 	_scsih_ir_shutdown(ioc);
11390 	_scsih_nvme_shutdown(ioc);
11391 	mpt3sas_base_mask_interrupts(ioc);
11392 	mpt3sas_base_stop_watchdog(ioc);
11393 	ioc->shost_recovery = 1;
11394 	mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11395 	ioc->shost_recovery = 0;
11396 	mpt3sas_base_free_irq(ioc);
11397 	mpt3sas_base_disable_msix(ioc);
11398 }
11399 
11400 
11401 /**
11402  * _scsih_probe_boot_devices - reports 1st device
11403  * @ioc: per adapter object
11404  *
11405  * If specified in bios page 2, this routine reports the 1st
11406  * device scsi-ml or sas transport for persistent boot device
11407  * purposes.  Please refer to function _scsih_determine_boot_device()
11408  */
11409 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)11410 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11411 {
11412 	u32 channel;
11413 	void *device;
11414 	struct _sas_device *sas_device;
11415 	struct _raid_device *raid_device;
11416 	struct _pcie_device *pcie_device;
11417 	u16 handle;
11418 	u64 sas_address_parent;
11419 	u64 sas_address;
11420 	unsigned long flags;
11421 	int rc;
11422 	int tid;
11423 	struct hba_port *port;
11424 
11425 	 /* no Bios, return immediately */
11426 	if (!ioc->bios_pg3.BiosVersion)
11427 		return;
11428 
11429 	device = NULL;
11430 	if (ioc->req_boot_device.device) {
11431 		device =  ioc->req_boot_device.device;
11432 		channel = ioc->req_boot_device.channel;
11433 	} else if (ioc->req_alt_boot_device.device) {
11434 		device =  ioc->req_alt_boot_device.device;
11435 		channel = ioc->req_alt_boot_device.channel;
11436 	} else if (ioc->current_boot_device.device) {
11437 		device =  ioc->current_boot_device.device;
11438 		channel = ioc->current_boot_device.channel;
11439 	}
11440 
11441 	if (!device)
11442 		return;
11443 
11444 	if (channel == RAID_CHANNEL) {
11445 		raid_device = device;
11446 		/*
11447 		 * If this boot vd is already registered with SML then
11448 		 * no need to register it again as part of device scanning
11449 		 * after diag reset during driver load operation.
11450 		 */
11451 		if (raid_device->starget)
11452 			return;
11453 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11454 		    raid_device->id, 0);
11455 		if (rc)
11456 			_scsih_raid_device_remove(ioc, raid_device);
11457 	} else if (channel == PCIE_CHANNEL) {
11458 		pcie_device = device;
11459 		/*
11460 		 * If this boot NVMe device is already registered with SML then
11461 		 * no need to register it again as part of device scanning
11462 		 * after diag reset during driver load operation.
11463 		 */
11464 		if (pcie_device->starget)
11465 			return;
11466 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11467 		tid = pcie_device->id;
11468 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11469 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11470 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11471 		if (rc)
11472 			_scsih_pcie_device_remove(ioc, pcie_device);
11473 	} else {
11474 		sas_device = device;
11475 		/*
11476 		 * If this boot sas/sata device is already registered with SML
11477 		 * then no need to register it again as part of device scanning
11478 		 * after diag reset during driver load operation.
11479 		 */
11480 		if (sas_device->starget)
11481 			return;
11482 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
11483 		handle = sas_device->handle;
11484 		sas_address_parent = sas_device->sas_address_parent;
11485 		sas_address = sas_device->sas_address;
11486 		port = sas_device->port;
11487 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
11488 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11489 
11490 		if (ioc->hide_drives)
11491 			return;
11492 
11493 		if (!port)
11494 			return;
11495 
11496 		if (!mpt3sas_transport_port_add(ioc, handle,
11497 		    sas_address_parent, port)) {
11498 			_scsih_sas_device_remove(ioc, sas_device);
11499 		} else if (!sas_device->starget) {
11500 			if (!ioc->is_driver_loading) {
11501 				mpt3sas_transport_port_remove(ioc,
11502 				    sas_address,
11503 				    sas_address_parent, port);
11504 				_scsih_sas_device_remove(ioc, sas_device);
11505 			}
11506 		}
11507 	}
11508 }
11509 
11510 /**
11511  * _scsih_probe_raid - reporting raid volumes to scsi-ml
11512  * @ioc: per adapter object
11513  *
11514  * Called during initial loading of the driver.
11515  */
11516 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)11517 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11518 {
11519 	struct _raid_device *raid_device, *raid_next;
11520 	int rc;
11521 
11522 	list_for_each_entry_safe(raid_device, raid_next,
11523 	    &ioc->raid_device_list, list) {
11524 		if (raid_device->starget)
11525 			continue;
11526 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11527 		    raid_device->id, 0);
11528 		if (rc)
11529 			_scsih_raid_device_remove(ioc, raid_device);
11530 	}
11531 }
11532 
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)11533 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11534 {
11535 	struct _sas_device *sas_device = NULL;
11536 	unsigned long flags;
11537 
11538 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11539 	if (!list_empty(&ioc->sas_device_init_list)) {
11540 		sas_device = list_first_entry(&ioc->sas_device_init_list,
11541 				struct _sas_device, list);
11542 		sas_device_get(sas_device);
11543 	}
11544 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11545 
11546 	return sas_device;
11547 }
11548 
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)11549 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11550 		struct _sas_device *sas_device)
11551 {
11552 	unsigned long flags;
11553 
11554 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11555 
11556 	/*
11557 	 * Since we dropped the lock during the call to port_add(), we need to
11558 	 * be careful here that somebody else didn't move or delete this item
11559 	 * while we were busy with other things.
11560 	 *
11561 	 * If it was on the list, we need a put() for the reference the list
11562 	 * had. Either way, we need a get() for the destination list.
11563 	 */
11564 	if (!list_empty(&sas_device->list)) {
11565 		list_del_init(&sas_device->list);
11566 		sas_device_put(sas_device);
11567 	}
11568 
11569 	sas_device_get(sas_device);
11570 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
11571 
11572 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11573 }
11574 
11575 /**
11576  * _scsih_probe_sas - reporting sas devices to sas transport
11577  * @ioc: per adapter object
11578  *
11579  * Called during initial loading of the driver.
11580  */
11581 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)11582 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11583 {
11584 	struct _sas_device *sas_device;
11585 
11586 	if (ioc->hide_drives)
11587 		return;
11588 
11589 	while ((sas_device = get_next_sas_device(ioc))) {
11590 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11591 		    sas_device->sas_address_parent, sas_device->port)) {
11592 			_scsih_sas_device_remove(ioc, sas_device);
11593 			sas_device_put(sas_device);
11594 			continue;
11595 		} else if (!sas_device->starget) {
11596 			/*
11597 			 * When asyn scanning is enabled, its not possible to
11598 			 * remove devices while scanning is turned on due to an
11599 			 * oops in scsi_sysfs_add_sdev()->add_device()->
11600 			 * sysfs_addrm_start()
11601 			 */
11602 			if (!ioc->is_driver_loading) {
11603 				mpt3sas_transport_port_remove(ioc,
11604 				    sas_device->sas_address,
11605 				    sas_device->sas_address_parent,
11606 				    sas_device->port);
11607 				_scsih_sas_device_remove(ioc, sas_device);
11608 				sas_device_put(sas_device);
11609 				continue;
11610 			}
11611 		}
11612 		sas_device_make_active(ioc, sas_device);
11613 		sas_device_put(sas_device);
11614 	}
11615 }
11616 
11617 /**
11618  * get_next_pcie_device - Get the next pcie device
11619  * @ioc: per adapter object
11620  *
11621  * Get the next pcie device from pcie_device_init_list list.
11622  *
11623  * Return: pcie device structure if pcie_device_init_list list is not empty
11624  * otherwise returns NULL
11625  */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)11626 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11627 {
11628 	struct _pcie_device *pcie_device = NULL;
11629 	unsigned long flags;
11630 
11631 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11632 	if (!list_empty(&ioc->pcie_device_init_list)) {
11633 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11634 				struct _pcie_device, list);
11635 		pcie_device_get(pcie_device);
11636 	}
11637 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11638 
11639 	return pcie_device;
11640 }
11641 
11642 /**
11643  * pcie_device_make_active - Add pcie device to pcie_device_list list
11644  * @ioc: per adapter object
11645  * @pcie_device: pcie device object
11646  *
11647  * Add the pcie device which has registered with SCSI Transport Later to
11648  * pcie_device_list list
11649  */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)11650 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11651 		struct _pcie_device *pcie_device)
11652 {
11653 	unsigned long flags;
11654 
11655 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11656 
11657 	if (!list_empty(&pcie_device->list)) {
11658 		list_del_init(&pcie_device->list);
11659 		pcie_device_put(pcie_device);
11660 	}
11661 	pcie_device_get(pcie_device);
11662 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11663 
11664 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11665 }
11666 
11667 /**
11668  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11669  * @ioc: per adapter object
11670  *
11671  * Called during initial loading of the driver.
11672  */
11673 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)11674 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11675 {
11676 	struct _pcie_device *pcie_device;
11677 	int rc;
11678 
11679 	/* PCIe Device List */
11680 	while ((pcie_device = get_next_pcie_device(ioc))) {
11681 		if (pcie_device->starget) {
11682 			pcie_device_put(pcie_device);
11683 			continue;
11684 		}
11685 		if (pcie_device->access_status ==
11686 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11687 			pcie_device_make_active(ioc, pcie_device);
11688 			pcie_device_put(pcie_device);
11689 			continue;
11690 		}
11691 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11692 			pcie_device->id, 0);
11693 		if (rc) {
11694 			_scsih_pcie_device_remove(ioc, pcie_device);
11695 			pcie_device_put(pcie_device);
11696 			continue;
11697 		} else if (!pcie_device->starget) {
11698 			/*
11699 			 * When async scanning is enabled, its not possible to
11700 			 * remove devices while scanning is turned on due to an
11701 			 * oops in scsi_sysfs_add_sdev()->add_device()->
11702 			 * sysfs_addrm_start()
11703 			 */
11704 			if (!ioc->is_driver_loading) {
11705 			/* TODO-- Need to find out whether this condition will
11706 			 * occur or not
11707 			 */
11708 				_scsih_pcie_device_remove(ioc, pcie_device);
11709 				pcie_device_put(pcie_device);
11710 				continue;
11711 			}
11712 		}
11713 		pcie_device_make_active(ioc, pcie_device);
11714 		pcie_device_put(pcie_device);
11715 	}
11716 }
11717 
11718 /**
11719  * _scsih_probe_devices - probing for devices
11720  * @ioc: per adapter object
11721  *
11722  * Called during initial loading of the driver.
11723  */
11724 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)11725 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11726 {
11727 	u16 volume_mapping_flags;
11728 
11729 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11730 		return;  /* return when IOC doesn't support initiator mode */
11731 
11732 	_scsih_probe_boot_devices(ioc);
11733 
11734 	if (ioc->ir_firmware) {
11735 		volume_mapping_flags =
11736 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11737 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11738 		if (volume_mapping_flags ==
11739 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11740 			_scsih_probe_raid(ioc);
11741 			_scsih_probe_sas(ioc);
11742 		} else {
11743 			_scsih_probe_sas(ioc);
11744 			_scsih_probe_raid(ioc);
11745 		}
11746 	} else {
11747 		_scsih_probe_sas(ioc);
11748 		_scsih_probe_pcie(ioc);
11749 	}
11750 }
11751 
11752 /**
11753  * scsih_scan_start - scsi lld callback for .scan_start
11754  * @shost: SCSI host pointer
11755  *
11756  * The shost has the ability to discover targets on its own instead
11757  * of scanning the entire bus.  In our implemention, we will kick off
11758  * firmware discovery.
11759  */
11760 static void
scsih_scan_start(struct Scsi_Host * shost)11761 scsih_scan_start(struct Scsi_Host *shost)
11762 {
11763 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11764 	int rc;
11765 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11766 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11767 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11768 		mpt3sas_enable_diag_buffer(ioc, 1);
11769 
11770 	if (disable_discovery > 0)
11771 		return;
11772 
11773 	ioc->start_scan = 1;
11774 	rc = mpt3sas_port_enable(ioc);
11775 
11776 	if (rc != 0)
11777 		ioc_info(ioc, "port enable: FAILED\n");
11778 }
11779 
11780 /**
11781  * _scsih_complete_devices_scanning - add the devices to sml and
11782  * complete ioc initialization.
11783  * @ioc: per adapter object
11784  *
11785  * Return nothing.
11786  */
_scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER * ioc)11787 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11788 {
11789 
11790 	if (ioc->wait_for_discovery_to_complete) {
11791 		ioc->wait_for_discovery_to_complete = 0;
11792 		_scsih_probe_devices(ioc);
11793 	}
11794 
11795 	mpt3sas_base_start_watchdog(ioc);
11796 	ioc->is_driver_loading = 0;
11797 }
11798 
11799 /**
11800  * scsih_scan_finished - scsi lld callback for .scan_finished
11801  * @shost: SCSI host pointer
11802  * @time: elapsed time of the scan in jiffies
11803  *
11804  * This function will be called periodicallyn until it returns 1 with the
11805  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11806  * we wait for firmware discovery to complete, then return 1.
11807  */
11808 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)11809 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11810 {
11811 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11812 	u32 ioc_state;
11813 	int issue_hard_reset = 0;
11814 
11815 	if (disable_discovery > 0) {
11816 		ioc->is_driver_loading = 0;
11817 		ioc->wait_for_discovery_to_complete = 0;
11818 		return 1;
11819 	}
11820 
11821 	if (time >= (300 * HZ)) {
11822 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11823 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11824 		ioc->is_driver_loading = 0;
11825 		return 1;
11826 	}
11827 
11828 	if (ioc->start_scan) {
11829 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11830 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11831 			mpt3sas_print_fault_code(ioc, ioc_state &
11832 			    MPI2_DOORBELL_DATA_MASK);
11833 			issue_hard_reset = 1;
11834 			goto out;
11835 		} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11836 				MPI2_IOC_STATE_COREDUMP) {
11837 			mpt3sas_base_coredump_info(ioc, ioc_state &
11838 			    MPI2_DOORBELL_DATA_MASK);
11839 			mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11840 			issue_hard_reset = 1;
11841 			goto out;
11842 		}
11843 		return 0;
11844 	}
11845 
11846 	if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11847 		ioc_info(ioc,
11848 		    "port enable: aborted due to diag reset\n");
11849 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11850 		goto out;
11851 	}
11852 	if (ioc->start_scan_failed) {
11853 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11854 			 ioc->start_scan_failed);
11855 		ioc->is_driver_loading = 0;
11856 		ioc->wait_for_discovery_to_complete = 0;
11857 		ioc->remove_host = 1;
11858 		return 1;
11859 	}
11860 
11861 	ioc_info(ioc, "port enable: SUCCESS\n");
11862 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11863 	_scsih_complete_devices_scanning(ioc);
11864 
11865 out:
11866 	if (issue_hard_reset) {
11867 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11868 		if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11869 			ioc->is_driver_loading = 0;
11870 	}
11871 	return 1;
11872 }
11873 
11874 /**
11875  * scsih_map_queues - map reply queues with request queues
11876  * @shost: SCSI host pointer
11877  */
scsih_map_queues(struct Scsi_Host * shost)11878 static void scsih_map_queues(struct Scsi_Host *shost)
11879 {
11880 	struct MPT3SAS_ADAPTER *ioc =
11881 	    (struct MPT3SAS_ADAPTER *)shost->hostdata;
11882 	struct blk_mq_queue_map *map;
11883 	int i, qoff, offset;
11884 	int nr_msix_vectors = ioc->iopoll_q_start_index;
11885 	int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11886 
11887 	if (shost->nr_hw_queues == 1)
11888 		return;
11889 
11890 	for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11891 		map = &shost->tag_set.map[i];
11892 		map->nr_queues = 0;
11893 		offset = 0;
11894 		if (i == HCTX_TYPE_DEFAULT) {
11895 			map->nr_queues =
11896 			    nr_msix_vectors - ioc->high_iops_queues;
11897 			offset = ioc->high_iops_queues;
11898 		} else if (i == HCTX_TYPE_POLL)
11899 			map->nr_queues = iopoll_q_count;
11900 
11901 		if (!map->nr_queues)
11902 			BUG_ON(i == HCTX_TYPE_DEFAULT);
11903 
11904 		/*
11905 		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11906 		 * affinity), so use the regular blk-mq cpu mapping
11907 		 */
11908 		map->queue_offset = qoff;
11909 		if (i != HCTX_TYPE_POLL)
11910 			blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
11911 		else
11912 			blk_mq_map_queues(map);
11913 
11914 		qoff += map->nr_queues;
11915 	}
11916 }
11917 
11918 /* shost template for SAS 2.0 HBA devices */
11919 static const struct scsi_host_template mpt2sas_driver_template = {
11920 	.module				= THIS_MODULE,
11921 	.name				= "Fusion MPT SAS Host",
11922 	.proc_name			= MPT2SAS_DRIVER_NAME,
11923 	.queuecommand			= scsih_qcmd,
11924 	.target_alloc			= scsih_target_alloc,
11925 	.sdev_init			= scsih_sdev_init,
11926 	.sdev_configure			= scsih_sdev_configure,
11927 	.target_destroy			= scsih_target_destroy,
11928 	.sdev_destroy			= scsih_sdev_destroy,
11929 	.scan_finished			= scsih_scan_finished,
11930 	.scan_start			= scsih_scan_start,
11931 	.change_queue_depth		= scsih_change_queue_depth,
11932 	.eh_abort_handler		= scsih_abort,
11933 	.eh_device_reset_handler	= scsih_dev_reset,
11934 	.eh_target_reset_handler	= scsih_target_reset,
11935 	.eh_host_reset_handler		= scsih_host_reset,
11936 	.bios_param			= scsih_bios_param,
11937 	.can_queue			= 1,
11938 	.this_id			= -1,
11939 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
11940 	.max_sectors			= 32767,
11941 	.cmd_per_lun			= 7,
11942 	.shost_groups			= mpt3sas_host_groups,
11943 	.sdev_groups			= mpt3sas_dev_groups,
11944 	.track_queue_depth		= 1,
11945 	.cmd_size			= sizeof(struct scsiio_tracker),
11946 };
11947 
11948 /* raid transport support for SAS 2.0 HBA devices */
11949 static struct raid_function_template mpt2sas_raid_functions = {
11950 	.cookie		= &mpt2sas_driver_template,
11951 	.is_raid	= scsih_is_raid,
11952 	.get_resync	= scsih_get_resync,
11953 	.get_state	= scsih_get_state,
11954 };
11955 
11956 /* shost template for SAS 3.0 HBA devices */
11957 static const struct scsi_host_template mpt3sas_driver_template = {
11958 	.module				= THIS_MODULE,
11959 	.name				= "Fusion MPT SAS Host",
11960 	.proc_name			= MPT3SAS_DRIVER_NAME,
11961 	.queuecommand			= scsih_qcmd,
11962 	.target_alloc			= scsih_target_alloc,
11963 	.sdev_init			= scsih_sdev_init,
11964 	.sdev_configure			= scsih_sdev_configure,
11965 	.target_destroy			= scsih_target_destroy,
11966 	.sdev_destroy			= scsih_sdev_destroy,
11967 	.scan_finished			= scsih_scan_finished,
11968 	.scan_start			= scsih_scan_start,
11969 	.change_queue_depth		= scsih_change_queue_depth,
11970 	.eh_abort_handler		= scsih_abort,
11971 	.eh_device_reset_handler	= scsih_dev_reset,
11972 	.eh_target_reset_handler	= scsih_target_reset,
11973 	.eh_host_reset_handler		= scsih_host_reset,
11974 	.bios_param			= scsih_bios_param,
11975 	.can_queue			= 1,
11976 	.this_id			= -1,
11977 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
11978 	.max_sectors			= 32767,
11979 	.max_segment_size		= 0xffffffff,
11980 	.cmd_per_lun			= 128,
11981 	.shost_groups			= mpt3sas_host_groups,
11982 	.sdev_groups			= mpt3sas_dev_groups,
11983 	.track_queue_depth		= 1,
11984 	.cmd_size			= sizeof(struct scsiio_tracker),
11985 	.map_queues			= scsih_map_queues,
11986 	.mq_poll			= mpt3sas_blk_mq_poll,
11987 };
11988 
11989 /* raid transport support for SAS 3.0 HBA devices */
11990 static struct raid_function_template mpt3sas_raid_functions = {
11991 	.cookie		= &mpt3sas_driver_template,
11992 	.is_raid	= scsih_is_raid,
11993 	.get_resync	= scsih_get_resync,
11994 	.get_state	= scsih_get_state,
11995 };
11996 
11997 /**
11998  * _scsih_determine_hba_mpi_version - determine in which MPI version class
11999  *					this device belongs to.
12000  * @pdev: PCI device struct
12001  *
12002  * return MPI2_VERSION for SAS 2.0 HBA devices,
12003  *	MPI25_VERSION for SAS 3.0 HBA devices, and
12004  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
12005  */
12006 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)12007 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
12008 {
12009 
12010 	switch (pdev->device) {
12011 	case MPI2_MFGPAGE_DEVID_SSS6200:
12012 	case MPI2_MFGPAGE_DEVID_SAS2004:
12013 	case MPI2_MFGPAGE_DEVID_SAS2008:
12014 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
12015 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
12016 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
12017 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
12018 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
12019 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
12020 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
12021 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
12022 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
12023 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
12024 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
12025 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
12026 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
12027 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
12028 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12029 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12030 		return MPI2_VERSION;
12031 	case MPI25_MFGPAGE_DEVID_SAS3004:
12032 	case MPI25_MFGPAGE_DEVID_SAS3008:
12033 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
12034 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
12035 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
12036 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
12037 		return MPI25_VERSION;
12038 	case MPI26_MFGPAGE_DEVID_SAS3216:
12039 	case MPI26_MFGPAGE_DEVID_SAS3224:
12040 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
12041 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
12042 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
12043 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
12044 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
12045 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
12046 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
12047 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
12048 	case MPI26_MFGPAGE_DEVID_SAS3508:
12049 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
12050 	case MPI26_MFGPAGE_DEVID_SAS3408:
12051 	case MPI26_MFGPAGE_DEVID_SAS3516:
12052 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
12053 	case MPI26_MFGPAGE_DEVID_SAS3416:
12054 	case MPI26_MFGPAGE_DEVID_SAS3616:
12055 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12056 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12057 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12058 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12059 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12060 	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12061 	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12062 	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12063 	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12064 		return MPI26_VERSION;
12065 	}
12066 	return 0;
12067 }
12068 
12069 /**
12070  * _scsih_probe - attach and add scsi host
12071  * @pdev: PCI device struct
12072  * @id: pci device id
12073  *
12074  * Return: 0 success, anything else error.
12075  */
12076 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)12077 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12078 {
12079 	struct MPT3SAS_ADAPTER *ioc;
12080 	struct Scsi_Host *shost = NULL;
12081 	int rv;
12082 	u16 hba_mpi_version;
12083 	int iopoll_q_count = 0;
12084 
12085 	/* Determine in which MPI version class this pci device belongs */
12086 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12087 	if (hba_mpi_version == 0)
12088 		return -ENODEV;
12089 
12090 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12091 	 * for other generation HBA's return with -ENODEV
12092 	 */
12093 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
12094 		return -ENODEV;
12095 
12096 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12097 	 * for other generation HBA's return with -ENODEV
12098 	 */
12099 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
12100 		|| hba_mpi_version ==  MPI26_VERSION)))
12101 		return -ENODEV;
12102 
12103 	switch (hba_mpi_version) {
12104 	case MPI2_VERSION:
12105 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12106 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12107 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
12108 		shost = scsi_host_alloc(&mpt2sas_driver_template,
12109 		  sizeof(struct MPT3SAS_ADAPTER));
12110 		if (!shost)
12111 			return -ENODEV;
12112 		ioc = shost_priv(shost);
12113 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12114 		ioc->hba_mpi_version_belonged = hba_mpi_version;
12115 		ioc->id = mpt2_ids++;
12116 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12117 		switch (pdev->device) {
12118 		case MPI2_MFGPAGE_DEVID_SSS6200:
12119 			ioc->is_warpdrive = 1;
12120 			ioc->hide_ir_msg = 1;
12121 			break;
12122 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12123 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12124 			ioc->is_mcpu_endpoint = 1;
12125 			break;
12126 		default:
12127 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12128 			break;
12129 		}
12130 
12131 		if (multipath_on_hba == -1 || multipath_on_hba == 0)
12132 			ioc->multipath_on_hba = 0;
12133 		else
12134 			ioc->multipath_on_hba = 1;
12135 
12136 		break;
12137 	case MPI25_VERSION:
12138 	case MPI26_VERSION:
12139 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
12140 		shost = scsi_host_alloc(&mpt3sas_driver_template,
12141 		  sizeof(struct MPT3SAS_ADAPTER));
12142 		if (!shost)
12143 			return -ENODEV;
12144 		ioc = shost_priv(shost);
12145 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12146 		ioc->hba_mpi_version_belonged = hba_mpi_version;
12147 		ioc->id = mpt3_ids++;
12148 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12149 		switch (pdev->device) {
12150 		case MPI26_MFGPAGE_DEVID_SAS3508:
12151 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
12152 		case MPI26_MFGPAGE_DEVID_SAS3408:
12153 		case MPI26_MFGPAGE_DEVID_SAS3516:
12154 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
12155 		case MPI26_MFGPAGE_DEVID_SAS3416:
12156 		case MPI26_MFGPAGE_DEVID_SAS3616:
12157 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12158 			ioc->is_gen35_ioc = 1;
12159 			break;
12160 		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12161 		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12162 			dev_err(&pdev->dev,
12163 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12164 			    pdev->device, pdev->subsystem_vendor,
12165 			    pdev->subsystem_device);
12166 			return 1;
12167 		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12168 		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12169 			dev_err(&pdev->dev,
12170 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12171 			    pdev->device, pdev->subsystem_vendor,
12172 			    pdev->subsystem_device);
12173 			return 1;
12174 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12175 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12176 			dev_info(&pdev->dev,
12177 			    "HBA is in Configurable Secure mode\n");
12178 			fallthrough;
12179 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12180 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12181 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12182 			break;
12183 		default:
12184 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12185 		}
12186 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12187 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12188 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12189 			ioc->combined_reply_queue = 1;
12190 			if (ioc->is_gen35_ioc)
12191 				ioc->combined_reply_index_count =
12192 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12193 			else
12194 				ioc->combined_reply_index_count =
12195 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12196 		}
12197 
12198 		switch (ioc->is_gen35_ioc) {
12199 		case 0:
12200 			if (multipath_on_hba == -1 || multipath_on_hba == 0)
12201 				ioc->multipath_on_hba = 0;
12202 			else
12203 				ioc->multipath_on_hba = 1;
12204 			break;
12205 		case 1:
12206 			if (multipath_on_hba == -1 || multipath_on_hba > 0)
12207 				ioc->multipath_on_hba = 1;
12208 			else
12209 				ioc->multipath_on_hba = 0;
12210 			break;
12211 		default:
12212 			break;
12213 		}
12214 
12215 		break;
12216 	default:
12217 		return -ENODEV;
12218 	}
12219 
12220 	INIT_LIST_HEAD(&ioc->list);
12221 	spin_lock(&gioc_lock);
12222 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12223 	spin_unlock(&gioc_lock);
12224 	ioc->shost = shost;
12225 	ioc->pdev = pdev;
12226 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12227 	ioc->tm_cb_idx = tm_cb_idx;
12228 	ioc->ctl_cb_idx = ctl_cb_idx;
12229 	ioc->base_cb_idx = base_cb_idx;
12230 	ioc->port_enable_cb_idx = port_enable_cb_idx;
12231 	ioc->transport_cb_idx = transport_cb_idx;
12232 	ioc->scsih_cb_idx = scsih_cb_idx;
12233 	ioc->config_cb_idx = config_cb_idx;
12234 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12235 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12236 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12237 	ioc->logging_level = logging_level;
12238 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12239 	/* Host waits for minimum of six seconds */
12240 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12241 	/*
12242 	 * Enable MEMORY MOVE support flag.
12243 	 */
12244 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12245 	/* Enable ADDITIONAL QUERY support flag. */
12246 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12247 
12248 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12249 
12250 	/* misc semaphores and spin locks */
12251 	mutex_init(&ioc->reset_in_progress_mutex);
12252 	mutex_init(&ioc->hostdiag_unlock_mutex);
12253 	/* initializing pci_access_mutex lock */
12254 	mutex_init(&ioc->pci_access_mutex);
12255 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12256 	spin_lock_init(&ioc->scsi_lookup_lock);
12257 	spin_lock_init(&ioc->sas_device_lock);
12258 	spin_lock_init(&ioc->sas_node_lock);
12259 	spin_lock_init(&ioc->fw_event_lock);
12260 	spin_lock_init(&ioc->raid_device_lock);
12261 	spin_lock_init(&ioc->pcie_device_lock);
12262 	spin_lock_init(&ioc->diag_trigger_lock);
12263 
12264 	INIT_LIST_HEAD(&ioc->sas_device_list);
12265 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
12266 	INIT_LIST_HEAD(&ioc->sas_expander_list);
12267 	INIT_LIST_HEAD(&ioc->enclosure_list);
12268 	INIT_LIST_HEAD(&ioc->pcie_device_list);
12269 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12270 	INIT_LIST_HEAD(&ioc->fw_event_list);
12271 	INIT_LIST_HEAD(&ioc->raid_device_list);
12272 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12273 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
12274 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
12275 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12276 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12277 	INIT_LIST_HEAD(&ioc->reply_queue_list);
12278 	INIT_LIST_HEAD(&ioc->port_table_list);
12279 
12280 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12281 
12282 	/* init shost parameters */
12283 	shost->max_cmd_len = 32;
12284 	shost->max_lun = max_lun;
12285 	shost->transportt = mpt3sas_transport_template;
12286 	shost->unique_id = ioc->id;
12287 
12288 	if (ioc->is_mcpu_endpoint) {
12289 		/* mCPU MPI support 64K max IO */
12290 		shost->max_sectors = 128;
12291 		ioc_info(ioc, "The max_sectors value is set to %d\n",
12292 			 shost->max_sectors);
12293 	} else {
12294 		if (max_sectors != 0xFFFF) {
12295 			if (max_sectors < 64) {
12296 				shost->max_sectors = 64;
12297 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12298 					 max_sectors);
12299 			} else if (max_sectors > 32767) {
12300 				shost->max_sectors = 32767;
12301 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12302 					 max_sectors);
12303 			} else {
12304 				shost->max_sectors = max_sectors & 0xFFFE;
12305 				ioc_info(ioc, "The max_sectors value is set to %d\n",
12306 					 shost->max_sectors);
12307 			}
12308 		}
12309 	}
12310 	/* register EEDP capabilities with SCSI layer */
12311 	if (prot_mask >= 0)
12312 		scsi_host_set_prot(shost, (prot_mask & 0x07));
12313 	else
12314 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12315 				   | SHOST_DIF_TYPE2_PROTECTION
12316 				   | SHOST_DIF_TYPE3_PROTECTION);
12317 
12318 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12319 
12320 	/* event thread */
12321 	ioc->firmware_event_thread = alloc_ordered_workqueue(
12322 		"fw_event_%s%d", 0, ioc->driver_name, ioc->id);
12323 	if (!ioc->firmware_event_thread) {
12324 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12325 			__FILE__, __LINE__, __func__);
12326 		rv = -ENODEV;
12327 		goto out_thread_fail;
12328 	}
12329 
12330 	shost->host_tagset = 0;
12331 
12332 	if (ioc->is_gen35_ioc && host_tagset_enable)
12333 		shost->host_tagset = 1;
12334 
12335 	ioc->is_driver_loading = 1;
12336 	if ((mpt3sas_base_attach(ioc))) {
12337 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12338 			__FILE__, __LINE__, __func__);
12339 		rv = -ENODEV;
12340 		goto out_attach_fail;
12341 	}
12342 
12343 	if (ioc->is_warpdrive) {
12344 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
12345 			ioc->hide_drives = 0;
12346 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
12347 			ioc->hide_drives = 1;
12348 		else {
12349 			if (mpt3sas_get_num_volumes(ioc))
12350 				ioc->hide_drives = 1;
12351 			else
12352 				ioc->hide_drives = 0;
12353 		}
12354 	} else
12355 		ioc->hide_drives = 0;
12356 
12357 	shost->nr_hw_queues = 1;
12358 
12359 	if (shost->host_tagset) {
12360 		shost->nr_hw_queues =
12361 		    ioc->reply_queue_count - ioc->high_iops_queues;
12362 
12363 		iopoll_q_count =
12364 		    ioc->reply_queue_count - ioc->iopoll_q_start_index;
12365 
12366 		shost->nr_maps = iopoll_q_count ? 3 : 1;
12367 
12368 		dev_info(&ioc->pdev->dev,
12369 		    "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12370 		    shost->can_queue, shost->nr_hw_queues);
12371 	}
12372 
12373 	rv = scsi_add_host(shost, &pdev->dev);
12374 	if (rv) {
12375 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12376 			__FILE__, __LINE__, __func__);
12377 		goto out_add_shost_fail;
12378 	}
12379 
12380 	scsi_scan_host(shost);
12381 	mpt3sas_setup_debugfs(ioc);
12382 	return 0;
12383 out_add_shost_fail:
12384 	mpt3sas_base_detach(ioc);
12385  out_attach_fail:
12386 	destroy_workqueue(ioc->firmware_event_thread);
12387  out_thread_fail:
12388 	spin_lock(&gioc_lock);
12389 	list_del(&ioc->list);
12390 	spin_unlock(&gioc_lock);
12391 	scsi_host_put(shost);
12392 	return rv;
12393 }
12394 
12395 /**
12396  * scsih_suspend - power management suspend main entry point
12397  * @dev: Device struct
12398  *
12399  * Return: 0 success, anything else error.
12400  */
12401 static int __maybe_unused
scsih_suspend(struct device * dev)12402 scsih_suspend(struct device *dev)
12403 {
12404 	struct pci_dev *pdev = to_pci_dev(dev);
12405 	struct Scsi_Host *shost;
12406 	struct MPT3SAS_ADAPTER *ioc;
12407 	int rc;
12408 
12409 	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12410 	if (rc)
12411 		return rc;
12412 
12413 	mpt3sas_base_stop_watchdog(ioc);
12414 	scsi_block_requests(shost);
12415 	_scsih_nvme_shutdown(ioc);
12416 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12417 		 pdev, pci_name(pdev));
12418 
12419 	mpt3sas_base_free_resources(ioc);
12420 	return 0;
12421 }
12422 
12423 /**
12424  * scsih_resume - power management resume main entry point
12425  * @dev: Device struct
12426  *
12427  * Return: 0 success, anything else error.
12428  */
12429 static int __maybe_unused
scsih_resume(struct device * dev)12430 scsih_resume(struct device *dev)
12431 {
12432 	struct pci_dev *pdev = to_pci_dev(dev);
12433 	struct Scsi_Host *shost;
12434 	struct MPT3SAS_ADAPTER *ioc;
12435 	pci_power_t device_state = pdev->current_state;
12436 	int r;
12437 
12438 	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12439 	if (r)
12440 		return r;
12441 
12442 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12443 		 pdev, pci_name(pdev), device_state);
12444 
12445 	ioc->pdev = pdev;
12446 	r = mpt3sas_base_map_resources(ioc);
12447 	if (r)
12448 		return r;
12449 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12450 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12451 	scsi_unblock_requests(shost);
12452 	mpt3sas_base_start_watchdog(ioc);
12453 	return 0;
12454 }
12455 
12456 /**
12457  * scsih_pci_error_detected - Called when a PCI error is detected.
12458  * @pdev: PCI device struct
12459  * @state: PCI channel state
12460  *
12461  * Description: Called when a PCI error is detected.
12462  *
12463  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12464  */
12465 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)12466 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12467 {
12468 	struct Scsi_Host *shost;
12469 	struct MPT3SAS_ADAPTER *ioc;
12470 
12471 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12472 		return PCI_ERS_RESULT_DISCONNECT;
12473 
12474 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12475 
12476 	switch (state) {
12477 	case pci_channel_io_normal:
12478 		return PCI_ERS_RESULT_CAN_RECOVER;
12479 	case pci_channel_io_frozen:
12480 		/* Fatal error, prepare for slot reset */
12481 		ioc->pci_error_recovery = 1;
12482 		scsi_block_requests(ioc->shost);
12483 		mpt3sas_base_stop_watchdog(ioc);
12484 		mpt3sas_base_free_resources(ioc);
12485 		return PCI_ERS_RESULT_NEED_RESET;
12486 	case pci_channel_io_perm_failure:
12487 		/* Permanent error, prepare for device removal */
12488 		ioc->pci_error_recovery = 1;
12489 		mpt3sas_base_stop_watchdog(ioc);
12490 		mpt3sas_base_pause_mq_polling(ioc);
12491 		_scsih_flush_running_cmds(ioc);
12492 		return PCI_ERS_RESULT_DISCONNECT;
12493 	}
12494 	return PCI_ERS_RESULT_NEED_RESET;
12495 }
12496 
12497 /**
12498  * scsih_pci_slot_reset - Called when PCI slot has been reset.
12499  * @pdev: PCI device struct
12500  *
12501  * Description: This routine is called by the pci error recovery
12502  * code after the PCI slot has been reset, just before we
12503  * should resume normal operations.
12504  */
12505 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)12506 scsih_pci_slot_reset(struct pci_dev *pdev)
12507 {
12508 	struct Scsi_Host *shost;
12509 	struct MPT3SAS_ADAPTER *ioc;
12510 	int rc;
12511 
12512 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12513 		return PCI_ERS_RESULT_DISCONNECT;
12514 
12515 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
12516 
12517 	ioc->pci_error_recovery = 0;
12518 	ioc->pdev = pdev;
12519 	pci_restore_state(pdev);
12520 	rc = mpt3sas_base_map_resources(ioc);
12521 	if (rc)
12522 		return PCI_ERS_RESULT_DISCONNECT;
12523 
12524 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12525 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12526 
12527 	ioc_warn(ioc, "hard reset: %s\n",
12528 		 (rc == 0) ? "success" : "failed");
12529 
12530 	if (!rc)
12531 		return PCI_ERS_RESULT_RECOVERED;
12532 	else
12533 		return PCI_ERS_RESULT_DISCONNECT;
12534 }
12535 
12536 /**
12537  * scsih_pci_resume() - resume normal ops after PCI reset
12538  * @pdev: pointer to PCI device
12539  *
12540  * Called when the error recovery driver tells us that its
12541  * OK to resume normal operation. Use completion to allow
12542  * halted scsi ops to resume.
12543  */
12544 static void
scsih_pci_resume(struct pci_dev * pdev)12545 scsih_pci_resume(struct pci_dev *pdev)
12546 {
12547 	struct Scsi_Host *shost;
12548 	struct MPT3SAS_ADAPTER *ioc;
12549 
12550 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12551 		return;
12552 
12553 	ioc_info(ioc, "PCI error: resume callback!!\n");
12554 
12555 	mpt3sas_base_start_watchdog(ioc);
12556 	scsi_unblock_requests(ioc->shost);
12557 }
12558 
12559 /**
12560  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12561  * @pdev: pointer to PCI device
12562  */
12563 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)12564 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12565 {
12566 	struct Scsi_Host *shost;
12567 	struct MPT3SAS_ADAPTER *ioc;
12568 
12569 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12570 		return PCI_ERS_RESULT_DISCONNECT;
12571 
12572 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12573 
12574 	/* TODO - dump whatever for debugging purposes */
12575 
12576 	/* This called only if scsih_pci_error_detected returns
12577 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12578 	 * works, no need to reset slot.
12579 	 */
12580 	return PCI_ERS_RESULT_RECOVERED;
12581 }
12582 
12583 /*
12584  * The pci device ids are defined in mpi/mpi2_cnfg.h.
12585  */
12586 static const struct pci_device_id mpt3sas_pci_table[] = {
12587 	/* Spitfire ~ 2004 */
12588 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12589 		PCI_ANY_ID, PCI_ANY_ID },
12590 	/* Falcon ~ 2008 */
12591 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12592 		PCI_ANY_ID, PCI_ANY_ID },
12593 	/* Liberator ~ 2108 */
12594 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12595 		PCI_ANY_ID, PCI_ANY_ID },
12596 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12597 		PCI_ANY_ID, PCI_ANY_ID },
12598 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12599 		PCI_ANY_ID, PCI_ANY_ID },
12600 	/* Meteor ~ 2116 */
12601 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12602 		PCI_ANY_ID, PCI_ANY_ID },
12603 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12604 		PCI_ANY_ID, PCI_ANY_ID },
12605 	/* Thunderbolt ~ 2208 */
12606 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12607 		PCI_ANY_ID, PCI_ANY_ID },
12608 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12609 		PCI_ANY_ID, PCI_ANY_ID },
12610 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12611 		PCI_ANY_ID, PCI_ANY_ID },
12612 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12613 		PCI_ANY_ID, PCI_ANY_ID },
12614 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12615 		PCI_ANY_ID, PCI_ANY_ID },
12616 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12617 		PCI_ANY_ID, PCI_ANY_ID },
12618 	/* Mustang ~ 2308 */
12619 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12620 		PCI_ANY_ID, PCI_ANY_ID },
12621 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12622 		PCI_ANY_ID, PCI_ANY_ID },
12623 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12624 		PCI_ANY_ID, PCI_ANY_ID },
12625 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12626 		PCI_ANY_ID, PCI_ANY_ID },
12627 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12628 		PCI_ANY_ID, PCI_ANY_ID },
12629 	/* SSS6200 */
12630 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12631 		PCI_ANY_ID, PCI_ANY_ID },
12632 	/* Fury ~ 3004 and 3008 */
12633 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12634 		PCI_ANY_ID, PCI_ANY_ID },
12635 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12636 		PCI_ANY_ID, PCI_ANY_ID },
12637 	/* Invader ~ 3108 */
12638 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12639 		PCI_ANY_ID, PCI_ANY_ID },
12640 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12641 		PCI_ANY_ID, PCI_ANY_ID },
12642 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12643 		PCI_ANY_ID, PCI_ANY_ID },
12644 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12645 		PCI_ANY_ID, PCI_ANY_ID },
12646 	/* Cutlass ~ 3216 and 3224 */
12647 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12648 		PCI_ANY_ID, PCI_ANY_ID },
12649 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12650 		PCI_ANY_ID, PCI_ANY_ID },
12651 	/* Intruder ~ 3316 and 3324 */
12652 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12653 		PCI_ANY_ID, PCI_ANY_ID },
12654 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12655 		PCI_ANY_ID, PCI_ANY_ID },
12656 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12657 		PCI_ANY_ID, PCI_ANY_ID },
12658 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12659 		PCI_ANY_ID, PCI_ANY_ID },
12660 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12661 		PCI_ANY_ID, PCI_ANY_ID },
12662 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12663 		PCI_ANY_ID, PCI_ANY_ID },
12664 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12665 		PCI_ANY_ID, PCI_ANY_ID },
12666 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12667 		PCI_ANY_ID, PCI_ANY_ID },
12668 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12669 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12670 		PCI_ANY_ID, PCI_ANY_ID },
12671 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12672 		PCI_ANY_ID, PCI_ANY_ID },
12673 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12674 		PCI_ANY_ID, PCI_ANY_ID },
12675 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12676 		PCI_ANY_ID, PCI_ANY_ID },
12677 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12678 		PCI_ANY_ID, PCI_ANY_ID },
12679 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12680 		PCI_ANY_ID, PCI_ANY_ID },
12681 	/* Mercator ~ 3616*/
12682 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12683 		PCI_ANY_ID, PCI_ANY_ID },
12684 
12685 	/* Aero SI 0x00E1 Configurable Secure
12686 	 * 0x00E2 Hard Secure
12687 	 */
12688 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12689 		PCI_ANY_ID, PCI_ANY_ID },
12690 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12691 		PCI_ANY_ID, PCI_ANY_ID },
12692 
12693 	/*
12694 	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12695 	 */
12696 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12697 		PCI_ANY_ID, PCI_ANY_ID },
12698 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12699 		PCI_ANY_ID, PCI_ANY_ID },
12700 
12701 	/* Atlas PCIe Switch Management Port */
12702 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12703 		PCI_ANY_ID, PCI_ANY_ID },
12704 
12705 	/* Sea SI 0x00E5 Configurable Secure
12706 	 * 0x00E6 Hard Secure
12707 	 */
12708 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12709 		PCI_ANY_ID, PCI_ANY_ID },
12710 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12711 		PCI_ANY_ID, PCI_ANY_ID },
12712 
12713 	/*
12714 	 * ATTO Branded ExpressSAS H12xx GT
12715 	 */
12716 	{ MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12717 		PCI_ANY_ID, PCI_ANY_ID },
12718 
12719 	/*
12720 	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12721 	 */
12722 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12723 		PCI_ANY_ID, PCI_ANY_ID },
12724 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12725 		PCI_ANY_ID, PCI_ANY_ID },
12726 
12727 	{0}     /* Terminating entry */
12728 };
12729 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12730 
12731 static const struct pci_error_handlers _mpt3sas_err_handler = {
12732 	.error_detected	= scsih_pci_error_detected,
12733 	.mmio_enabled	= scsih_pci_mmio_enabled,
12734 	.slot_reset	= scsih_pci_slot_reset,
12735 	.resume		= scsih_pci_resume,
12736 };
12737 
12738 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12739 
12740 static struct pci_driver mpt3sas_driver = {
12741 	.name		= MPT3SAS_DRIVER_NAME,
12742 	.id_table	= mpt3sas_pci_table,
12743 	.probe		= _scsih_probe,
12744 	.remove		= scsih_remove,
12745 	.shutdown	= scsih_shutdown,
12746 	.err_handler	= &_mpt3sas_err_handler,
12747 	.driver.pm	= &scsih_pm_ops,
12748 };
12749 
12750 /**
12751  * scsih_init - main entry point for this driver.
12752  *
12753  * Return: 0 success, anything else error.
12754  */
12755 static int
scsih_init(void)12756 scsih_init(void)
12757 {
12758 	mpt2_ids = 0;
12759 	mpt3_ids = 0;
12760 
12761 	mpt3sas_base_initialize_callback_handler();
12762 
12763 	 /* queuecommand callback hander */
12764 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12765 
12766 	/* task management callback handler */
12767 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12768 
12769 	/* base internal commands callback handler */
12770 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12771 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12772 	    mpt3sas_port_enable_done);
12773 
12774 	/* transport internal commands callback handler */
12775 	transport_cb_idx = mpt3sas_base_register_callback_handler(
12776 	    mpt3sas_transport_done);
12777 
12778 	/* scsih internal commands callback handler */
12779 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12780 
12781 	/* configuration page API internal commands callback handler */
12782 	config_cb_idx = mpt3sas_base_register_callback_handler(
12783 	    mpt3sas_config_done);
12784 
12785 	/* ctl module callback handler */
12786 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12787 
12788 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12789 	    _scsih_tm_tr_complete);
12790 
12791 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12792 	    _scsih_tm_volume_tr_complete);
12793 
12794 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12795 	    _scsih_sas_control_complete);
12796 
12797 	mpt3sas_init_debugfs();
12798 	return 0;
12799 }
12800 
12801 /**
12802  * scsih_exit - exit point for this driver (when it is a module).
12803  *
12804  * Return: 0 success, anything else error.
12805  */
12806 static void
scsih_exit(void)12807 scsih_exit(void)
12808 {
12809 
12810 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12811 	mpt3sas_base_release_callback_handler(tm_cb_idx);
12812 	mpt3sas_base_release_callback_handler(base_cb_idx);
12813 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12814 	mpt3sas_base_release_callback_handler(transport_cb_idx);
12815 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
12816 	mpt3sas_base_release_callback_handler(config_cb_idx);
12817 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
12818 
12819 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12820 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12821 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12822 
12823 /* raid transport support */
12824 	if (hbas_to_enumerate != 1)
12825 		raid_class_release(mpt3sas_raid_template);
12826 	if (hbas_to_enumerate != 2)
12827 		raid_class_release(mpt2sas_raid_template);
12828 	sas_release_transport(mpt3sas_transport_template);
12829 	mpt3sas_exit_debugfs();
12830 }
12831 
12832 /**
12833  * _mpt3sas_init - main entry point for this driver.
12834  *
12835  * Return: 0 success, anything else error.
12836  */
12837 static int __init
_mpt3sas_init(void)12838 _mpt3sas_init(void)
12839 {
12840 	int error;
12841 
12842 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12843 					MPT3SAS_DRIVER_VERSION);
12844 
12845 	mpt3sas_transport_template =
12846 	    sas_attach_transport(&mpt3sas_transport_functions);
12847 	if (!mpt3sas_transport_template)
12848 		return -ENODEV;
12849 
12850 	/* No need attach mpt3sas raid functions template
12851 	 * if hbas_to_enumarate value is one.
12852 	 */
12853 	if (hbas_to_enumerate != 1) {
12854 		mpt3sas_raid_template =
12855 				raid_class_attach(&mpt3sas_raid_functions);
12856 		if (!mpt3sas_raid_template) {
12857 			sas_release_transport(mpt3sas_transport_template);
12858 			return -ENODEV;
12859 		}
12860 	}
12861 
12862 	/* No need to attach mpt2sas raid functions template
12863 	 * if hbas_to_enumarate value is two
12864 	 */
12865 	if (hbas_to_enumerate != 2) {
12866 		mpt2sas_raid_template =
12867 				raid_class_attach(&mpt2sas_raid_functions);
12868 		if (!mpt2sas_raid_template) {
12869 			sas_release_transport(mpt3sas_transport_template);
12870 			return -ENODEV;
12871 		}
12872 	}
12873 
12874 	error = scsih_init();
12875 	if (error) {
12876 		scsih_exit();
12877 		return error;
12878 	}
12879 
12880 	mpt3sas_ctl_init(hbas_to_enumerate);
12881 
12882 	error = pci_register_driver(&mpt3sas_driver);
12883 	if (error) {
12884 		mpt3sas_ctl_exit(hbas_to_enumerate);
12885 		scsih_exit();
12886 	}
12887 
12888 	return error;
12889 }
12890 
12891 /**
12892  * _mpt3sas_exit - exit point for this driver (when it is a module).
12893  *
12894  */
12895 static void __exit
_mpt3sas_exit(void)12896 _mpt3sas_exit(void)
12897 {
12898 	pr_info("mpt3sas version %s unloading\n",
12899 				MPT3SAS_DRIVER_VERSION);
12900 
12901 	pci_unregister_driver(&mpt3sas_driver);
12902 
12903 	mpt3sas_ctl_exit(hbas_to_enumerate);
12904 
12905 	scsih_exit();
12906 }
12907 
12908 module_init(_mpt3sas_init);
12909 module_exit(_mpt3sas_exit);
12910