1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Broadcom MPI3 Storage Controllers
4 *
5 * Copyright (C) 2017-2023 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7 *
8 */
9
10 #include "mpi3mr.h"
11 #include <linux/idr.h>
12
13 /* global driver scop variables */
14 LIST_HEAD(mrioc_list);
15 DEFINE_SPINLOCK(mrioc_list_lock);
16 static DEFINE_IDA(mrioc_ida);
17 static int warn_non_secure_ctlr;
18 atomic64_t event_counter;
19
20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
23 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
24
25 /* Module parameters*/
26 int prot_mask = -1;
27 module_param(prot_mask, int, 0);
28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
29
30 static int prot_guard_mask = 3;
31 module_param(prot_guard_mask, int, 0);
32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
33 static int logging_level;
34 module_param(logging_level, int, 0);
35 MODULE_PARM_DESC(logging_level,
36 " bits for enabling additional logging info (default=0)");
37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
38 module_param(max_sgl_entries, int, 0444);
39 MODULE_PARM_DESC(max_sgl_entries,
40 "Preferred max number of SG entries to be used for a single I/O\n"
41 "The actual value will be determined by the driver\n"
42 "(Minimum=256, Maximum=2048, default=256)");
43
44 /* Forward declarations*/
45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
47
48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
49
50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
51
52 /*
53 * SAS Log info code for a NCQ collateral abort after an NCQ error:
54 * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
55 * See: drivers/message/fusion/lsi/mpi_log_sas.h
56 */
57 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR 0x31080000
58
59 /**
60 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
61 * @mrioc: Adapter instance reference
62 * @scmd: SCSI command reference
63 *
64 * Calculate the host tag based on block tag for a given scmd.
65 *
66 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
67 */
mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd)68 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
69 struct scsi_cmnd *scmd)
70 {
71 struct scmd_priv *priv = NULL;
72 u32 unique_tag;
73 u16 host_tag, hw_queue;
74
75 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
76
77 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
78 if (hw_queue >= mrioc->num_op_reply_q)
79 return MPI3MR_HOSTTAG_INVALID;
80 host_tag = blk_mq_unique_tag_to_tag(unique_tag);
81
82 if (WARN_ON(host_tag >= mrioc->max_host_ios))
83 return MPI3MR_HOSTTAG_INVALID;
84
85 priv = scsi_cmd_priv(scmd);
86 /*host_tag 0 is invalid hence incrementing by 1*/
87 priv->host_tag = host_tag + 1;
88 priv->scmd = scmd;
89 priv->in_lld_scope = 1;
90 priv->req_q_idx = hw_queue;
91 priv->meta_chain_idx = -1;
92 priv->chain_idx = -1;
93 priv->meta_sg_valid = 0;
94 return priv->host_tag;
95 }
96
97 /**
98 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
99 * @mrioc: Adapter instance reference
100 * @host_tag: Host tag
101 * @qidx: Operational queue index
102 *
103 * Identify the block tag from the host tag and queue index and
104 * retrieve associated scsi command using scsi_host_find_tag().
105 *
106 * Return: SCSI command reference or NULL.
107 */
mpi3mr_scmd_from_host_tag(struct mpi3mr_ioc * mrioc,u16 host_tag,u16 qidx)108 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
109 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
110 {
111 struct scsi_cmnd *scmd = NULL;
112 struct scmd_priv *priv = NULL;
113 u32 unique_tag = host_tag - 1;
114
115 if (WARN_ON(host_tag > mrioc->max_host_ios))
116 goto out;
117
118 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
119
120 scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
121 if (scmd) {
122 priv = scsi_cmd_priv(scmd);
123 if (!priv->in_lld_scope)
124 scmd = NULL;
125 }
126 out:
127 return scmd;
128 }
129
130 /**
131 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
132 * @mrioc: Adapter instance reference
133 * @scmd: SCSI command reference
134 *
135 * Invalidate the SCSI command private data to mark the command
136 * is not in LLD scope anymore.
137 *
138 * Return: Nothing.
139 */
mpi3mr_clear_scmd_priv(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd)140 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
141 struct scsi_cmnd *scmd)
142 {
143 struct scmd_priv *priv = NULL;
144
145 priv = scsi_cmd_priv(scmd);
146
147 if (WARN_ON(priv->in_lld_scope == 0))
148 return;
149 priv->host_tag = MPI3MR_HOSTTAG_INVALID;
150 priv->req_q_idx = 0xFFFF;
151 priv->scmd = NULL;
152 priv->in_lld_scope = 0;
153 priv->meta_sg_valid = 0;
154 if (priv->chain_idx >= 0) {
155 clear_bit(priv->chain_idx, mrioc->chain_bitmap);
156 priv->chain_idx = -1;
157 }
158 if (priv->meta_chain_idx >= 0) {
159 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
160 priv->meta_chain_idx = -1;
161 }
162 }
163
164 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
165 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
166 static void mpi3mr_fwevt_worker(struct work_struct *work);
167
168 /**
169 * mpi3mr_fwevt_free - firmware event memory dealloctor
170 * @r: k reference pointer of the firmware event
171 *
172 * Free firmware event memory when no reference.
173 */
mpi3mr_fwevt_free(struct kref * r)174 static void mpi3mr_fwevt_free(struct kref *r)
175 {
176 kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
177 }
178
179 /**
180 * mpi3mr_fwevt_get - k reference incrementor
181 * @fwevt: Firmware event reference
182 *
183 * Increment firmware event reference count.
184 */
mpi3mr_fwevt_get(struct mpi3mr_fwevt * fwevt)185 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
186 {
187 kref_get(&fwevt->ref_count);
188 }
189
190 /**
191 * mpi3mr_fwevt_put - k reference decrementor
192 * @fwevt: Firmware event reference
193 *
194 * decrement firmware event reference count.
195 */
mpi3mr_fwevt_put(struct mpi3mr_fwevt * fwevt)196 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
197 {
198 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
199 }
200
201 /**
202 * mpi3mr_alloc_fwevt - Allocate firmware event
203 * @len: length of firmware event data to allocate
204 *
205 * Allocate firmware event with required length and initialize
206 * the reference counter.
207 *
208 * Return: firmware event reference.
209 */
mpi3mr_alloc_fwevt(int len)210 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
211 {
212 struct mpi3mr_fwevt *fwevt;
213
214 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
215 if (!fwevt)
216 return NULL;
217
218 kref_init(&fwevt->ref_count);
219 return fwevt;
220 }
221
222 /**
223 * mpi3mr_fwevt_add_to_list - Add firmware event to the list
224 * @mrioc: Adapter instance reference
225 * @fwevt: Firmware event reference
226 *
227 * Add the given firmware event to the firmware event list.
228 *
229 * Return: Nothing.
230 */
mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)231 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
232 struct mpi3mr_fwevt *fwevt)
233 {
234 unsigned long flags;
235
236 if (!mrioc->fwevt_worker_thread)
237 return;
238
239 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
240 /* get fwevt reference count while adding it to fwevt_list */
241 mpi3mr_fwevt_get(fwevt);
242 INIT_LIST_HEAD(&fwevt->list);
243 list_add_tail(&fwevt->list, &mrioc->fwevt_list);
244 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
245 /* get fwevt reference count while enqueueing it to worker queue */
246 mpi3mr_fwevt_get(fwevt);
247 queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
248 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
249 }
250
251 /**
252 * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to
253 * the list
254 * @mrioc: Adapter instance reference
255 * @event_data: Event data
256 *
257 * Add the given hdb trigger data event to the firmware event
258 * list.
259 *
260 * Return: Nothing.
261 */
mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc * mrioc,struct trigger_event_data * event_data)262 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc,
263 struct trigger_event_data *event_data)
264 {
265 struct mpi3mr_fwevt *fwevt;
266 u16 sz = sizeof(*event_data);
267
268 fwevt = mpi3mr_alloc_fwevt(sz);
269 if (!fwevt) {
270 ioc_warn(mrioc, "failed to queue hdb trigger data event\n");
271 return;
272 }
273
274 fwevt->mrioc = mrioc;
275 fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER;
276 fwevt->send_ack = 0;
277 fwevt->process_evt = 1;
278 fwevt->evt_ctx = 0;
279 fwevt->event_data_size = sz;
280 memcpy(fwevt->event_data, event_data, sz);
281
282 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
283 }
284
285 /**
286 * mpi3mr_fwevt_del_from_list - Delete firmware event from list
287 * @mrioc: Adapter instance reference
288 * @fwevt: Firmware event reference
289 *
290 * Delete the given firmware event from the firmware event list.
291 *
292 * Return: Nothing.
293 */
mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)294 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
295 struct mpi3mr_fwevt *fwevt)
296 {
297 unsigned long flags;
298
299 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
300 if (!list_empty(&fwevt->list)) {
301 list_del_init(&fwevt->list);
302 /*
303 * Put fwevt reference count after
304 * removing it from fwevt_list
305 */
306 mpi3mr_fwevt_put(fwevt);
307 }
308 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
309 }
310
311 /**
312 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
313 * @mrioc: Adapter instance reference
314 *
315 * Dequeue a firmware event from the firmware event list.
316 *
317 * Return: firmware event.
318 */
mpi3mr_dequeue_fwevt(struct mpi3mr_ioc * mrioc)319 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
320 struct mpi3mr_ioc *mrioc)
321 {
322 unsigned long flags;
323 struct mpi3mr_fwevt *fwevt = NULL;
324
325 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
326 if (!list_empty(&mrioc->fwevt_list)) {
327 fwevt = list_first_entry(&mrioc->fwevt_list,
328 struct mpi3mr_fwevt, list);
329 list_del_init(&fwevt->list);
330 /*
331 * Put fwevt reference count after
332 * removing it from fwevt_list
333 */
334 mpi3mr_fwevt_put(fwevt);
335 }
336 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
337
338 return fwevt;
339 }
340
341 /**
342 * mpi3mr_cancel_work - cancel firmware event
343 * @fwevt: fwevt object which needs to be canceled
344 *
345 * Return: Nothing.
346 */
mpi3mr_cancel_work(struct mpi3mr_fwevt * fwevt)347 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
348 {
349 /*
350 * Wait on the fwevt to complete. If this returns 1, then
351 * the event was never executed.
352 *
353 * If it did execute, we wait for it to finish, and the put will
354 * happen from mpi3mr_process_fwevt()
355 */
356 if (cancel_work_sync(&fwevt->work)) {
357 /*
358 * Put fwevt reference count after
359 * dequeuing it from worker queue
360 */
361 mpi3mr_fwevt_put(fwevt);
362 /*
363 * Put fwevt reference count to neutralize
364 * kref_init increment
365 */
366 mpi3mr_fwevt_put(fwevt);
367 }
368 }
369
370 /**
371 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
372 * @mrioc: Adapter instance reference
373 *
374 * Flush all pending firmware events from the firmware event
375 * list.
376 *
377 * Return: Nothing.
378 */
mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc * mrioc)379 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
380 {
381 struct mpi3mr_fwevt *fwevt = NULL;
382
383 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
384 !mrioc->fwevt_worker_thread)
385 return;
386
387 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
388 mpi3mr_cancel_work(fwevt);
389
390 if (mrioc->current_event) {
391 fwevt = mrioc->current_event;
392 /*
393 * Don't call cancel_work_sync() API for the
394 * fwevt work if the controller reset is
395 * get called as part of processing the
396 * same fwevt work (or) when worker thread is
397 * waiting for device add/remove APIs to complete.
398 * Otherwise we will see deadlock.
399 */
400 if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
401 fwevt->discard = 1;
402 return;
403 }
404
405 mpi3mr_cancel_work(fwevt);
406 }
407 }
408
409 /**
410 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
411 * @mrioc: Adapter instance reference
412 * @tg: Throttle group information pointer
413 *
414 * Accessor to queue on synthetically generated driver event to
415 * the event worker thread, the driver event will be used to
416 * reduce the QD of all VDs in the TG from the worker thread.
417 *
418 * Return: None.
419 */
mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc * mrioc,struct mpi3mr_throttle_group_info * tg)420 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
421 struct mpi3mr_throttle_group_info *tg)
422 {
423 struct mpi3mr_fwevt *fwevt;
424 u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
425
426 /*
427 * If the QD reduction event is already queued due to throttle and if
428 * the QD is not restored through device info change event
429 * then dont queue further reduction events
430 */
431 if (tg->fw_qd != tg->modified_qd)
432 return;
433
434 fwevt = mpi3mr_alloc_fwevt(sz);
435 if (!fwevt) {
436 ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
437 return;
438 }
439 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
440 fwevt->mrioc = mrioc;
441 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
442 fwevt->send_ack = 0;
443 fwevt->process_evt = 1;
444 fwevt->evt_ctx = 0;
445 fwevt->event_data_size = sz;
446 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
447
448 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
449 tg->id);
450 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
451 }
452
453 /**
454 * mpi3mr_invalidate_devhandles -Invalidate device handles
455 * @mrioc: Adapter instance reference
456 *
457 * Invalidate the device handles in the target device structures
458 * . Called post reset prior to reinitializing the controller.
459 *
460 * Return: Nothing.
461 */
mpi3mr_invalidate_devhandles(struct mpi3mr_ioc * mrioc)462 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
463 {
464 struct mpi3mr_tgt_dev *tgtdev;
465 struct mpi3mr_stgt_priv_data *tgt_priv;
466
467 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
468 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
469 if (tgtdev->starget && tgtdev->starget->hostdata) {
470 tgt_priv = tgtdev->starget->hostdata;
471 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
472 tgt_priv->io_throttle_enabled = 0;
473 tgt_priv->io_divert = 0;
474 tgt_priv->throttle_group = NULL;
475 tgt_priv->wslen = 0;
476 if (tgtdev->host_exposed)
477 atomic_set(&tgt_priv->block_io, 1);
478 }
479 }
480 }
481
482 /**
483 * mpi3mr_print_scmd - print individual SCSI command
484 * @rq: Block request
485 * @data: Adapter instance reference
486 *
487 * Print the SCSI command details if it is in LLD scope.
488 *
489 * Return: true always.
490 */
mpi3mr_print_scmd(struct request * rq,void * data)491 static bool mpi3mr_print_scmd(struct request *rq, void *data)
492 {
493 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
494 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
495 struct scmd_priv *priv = NULL;
496
497 if (scmd) {
498 priv = scsi_cmd_priv(scmd);
499 if (!priv->in_lld_scope)
500 goto out;
501
502 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
503 __func__, priv->host_tag, priv->req_q_idx + 1);
504 scsi_print_command(scmd);
505 }
506
507 out:
508 return(true);
509 }
510
511 /**
512 * mpi3mr_flush_scmd - Flush individual SCSI command
513 * @rq: Block request
514 * @data: Adapter instance reference
515 *
516 * Return the SCSI command to the upper layers if it is in LLD
517 * scope.
518 *
519 * Return: true always.
520 */
521
mpi3mr_flush_scmd(struct request * rq,void * data)522 static bool mpi3mr_flush_scmd(struct request *rq, void *data)
523 {
524 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
525 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
526 struct scmd_priv *priv = NULL;
527
528 if (scmd) {
529 priv = scsi_cmd_priv(scmd);
530 if (!priv->in_lld_scope)
531 goto out;
532
533 if (priv->meta_sg_valid)
534 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
535 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
536 mpi3mr_clear_scmd_priv(mrioc, scmd);
537 scsi_dma_unmap(scmd);
538 scmd->result = DID_RESET << 16;
539 scsi_print_command(scmd);
540 scsi_done(scmd);
541 mrioc->flush_io_count++;
542 }
543
544 out:
545 return(true);
546 }
547
548 /**
549 * mpi3mr_count_dev_pending - Count commands pending for a lun
550 * @rq: Block request
551 * @data: SCSI device reference
552 *
553 * This is an iterator function called for each SCSI command in
554 * a host and if the command is pending in the LLD for the
555 * specific device(lun) then device specific pending I/O counter
556 * is updated in the device structure.
557 *
558 * Return: true always.
559 */
560
mpi3mr_count_dev_pending(struct request * rq,void * data)561 static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
562 {
563 struct scsi_device *sdev = (struct scsi_device *)data;
564 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
565 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
566 struct scmd_priv *priv;
567
568 if (scmd) {
569 priv = scsi_cmd_priv(scmd);
570 if (!priv->in_lld_scope)
571 goto out;
572 if (scmd->device == sdev)
573 sdev_priv_data->pend_count++;
574 }
575
576 out:
577 return true;
578 }
579
580 /**
581 * mpi3mr_count_tgt_pending - Count commands pending for target
582 * @rq: Block request
583 * @data: SCSI target reference
584 *
585 * This is an iterator function called for each SCSI command in
586 * a host and if the command is pending in the LLD for the
587 * specific target then target specific pending I/O counter is
588 * updated in the target structure.
589 *
590 * Return: true always.
591 */
592
mpi3mr_count_tgt_pending(struct request * rq,void * data)593 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
594 {
595 struct scsi_target *starget = (struct scsi_target *)data;
596 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
597 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
598 struct scmd_priv *priv;
599
600 if (scmd) {
601 priv = scsi_cmd_priv(scmd);
602 if (!priv->in_lld_scope)
603 goto out;
604 if (scmd->device && (scsi_target(scmd->device) == starget))
605 stgt_priv_data->pend_count++;
606 }
607
608 out:
609 return true;
610 }
611
612 /**
613 * mpi3mr_flush_host_io - Flush host I/Os
614 * @mrioc: Adapter instance reference
615 *
616 * Flush all of the pending I/Os by calling
617 * blk_mq_tagset_busy_iter() for each possible tag. This is
618 * executed post controller reset
619 *
620 * Return: Nothing.
621 */
mpi3mr_flush_host_io(struct mpi3mr_ioc * mrioc)622 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
623 {
624 struct Scsi_Host *shost = mrioc->shost;
625
626 mrioc->flush_io_count = 0;
627 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
628 blk_mq_tagset_busy_iter(&shost->tag_set,
629 mpi3mr_flush_scmd, (void *)mrioc);
630 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
631 mrioc->flush_io_count);
632 }
633
634 /**
635 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
636 * @mrioc: Adapter instance reference
637 *
638 * This function waits for currently running IO poll threads to
639 * exit and then flushes all host I/Os and any internal pending
640 * cmds. This is executed after controller is marked as
641 * unrecoverable.
642 *
643 * Return: Nothing.
644 */
mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc * mrioc)645 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
646 {
647 struct Scsi_Host *shost = mrioc->shost;
648 int i;
649
650 if (!mrioc->unrecoverable)
651 return;
652
653 if (mrioc->op_reply_qinfo) {
654 for (i = 0; i < mrioc->num_queues; i++) {
655 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
656 udelay(500);
657 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
658 }
659 }
660 mrioc->flush_io_count = 0;
661 blk_mq_tagset_busy_iter(&shost->tag_set,
662 mpi3mr_flush_scmd, (void *)mrioc);
663 mpi3mr_flush_delayed_cmd_lists(mrioc);
664 mpi3mr_flush_drv_cmds(mrioc);
665 }
666
667 /**
668 * mpi3mr_alloc_tgtdev - target device allocator
669 *
670 * Allocate target device instance and initialize the reference
671 * count
672 *
673 * Return: target device instance.
674 */
mpi3mr_alloc_tgtdev(void)675 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
676 {
677 struct mpi3mr_tgt_dev *tgtdev;
678
679 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
680 if (!tgtdev)
681 return NULL;
682 kref_init(&tgtdev->ref_count);
683 return tgtdev;
684 }
685
686 /**
687 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
688 * @mrioc: Adapter instance reference
689 * @tgtdev: Target device
690 *
691 * Add the target device to the target device list
692 *
693 * Return: Nothing.
694 */
mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev)695 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
696 struct mpi3mr_tgt_dev *tgtdev)
697 {
698 unsigned long flags;
699
700 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
701 mpi3mr_tgtdev_get(tgtdev);
702 INIT_LIST_HEAD(&tgtdev->list);
703 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
704 tgtdev->state = MPI3MR_DEV_CREATED;
705 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
706 }
707
708 /**
709 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
710 * @mrioc: Adapter instance reference
711 * @tgtdev: Target device
712 * @must_delete: Must delete the target device from the list irrespective
713 * of the device state.
714 *
715 * Remove the target device from the target device list
716 *
717 * Return: Nothing.
718 */
mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev,bool must_delete)719 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
720 struct mpi3mr_tgt_dev *tgtdev, bool must_delete)
721 {
722 unsigned long flags;
723
724 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
725 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) {
726 if (!list_empty(&tgtdev->list)) {
727 list_del_init(&tgtdev->list);
728 tgtdev->state = MPI3MR_DEV_DELETED;
729 mpi3mr_tgtdev_put(tgtdev);
730 }
731 }
732 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
733 }
734
735 /**
736 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
737 * @mrioc: Adapter instance reference
738 * @handle: Device handle
739 *
740 * Accessor to retrieve target device from the device handle.
741 * Non Lock version
742 *
743 * Return: Target device reference.
744 */
__mpi3mr_get_tgtdev_by_handle(struct mpi3mr_ioc * mrioc,u16 handle)745 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
746 struct mpi3mr_ioc *mrioc, u16 handle)
747 {
748 struct mpi3mr_tgt_dev *tgtdev;
749
750 assert_spin_locked(&mrioc->tgtdev_lock);
751 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
752 if (tgtdev->dev_handle == handle)
753 goto found_tgtdev;
754 return NULL;
755
756 found_tgtdev:
757 mpi3mr_tgtdev_get(tgtdev);
758 return tgtdev;
759 }
760
761 /**
762 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
763 * @mrioc: Adapter instance reference
764 * @handle: Device handle
765 *
766 * Accessor to retrieve target device from the device handle.
767 * Lock version
768 *
769 * Return: Target device reference.
770 */
mpi3mr_get_tgtdev_by_handle(struct mpi3mr_ioc * mrioc,u16 handle)771 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
772 struct mpi3mr_ioc *mrioc, u16 handle)
773 {
774 struct mpi3mr_tgt_dev *tgtdev;
775 unsigned long flags;
776
777 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
778 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
779 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
780 return tgtdev;
781 }
782
783 /**
784 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
785 * @mrioc: Adapter instance reference
786 * @persist_id: Persistent ID
787 *
788 * Accessor to retrieve target device from the Persistent ID.
789 * Non Lock version
790 *
791 * Return: Target device reference.
792 */
__mpi3mr_get_tgtdev_by_perst_id(struct mpi3mr_ioc * mrioc,u16 persist_id)793 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
794 struct mpi3mr_ioc *mrioc, u16 persist_id)
795 {
796 struct mpi3mr_tgt_dev *tgtdev;
797
798 assert_spin_locked(&mrioc->tgtdev_lock);
799 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
800 if (tgtdev->perst_id == persist_id)
801 goto found_tgtdev;
802 return NULL;
803
804 found_tgtdev:
805 mpi3mr_tgtdev_get(tgtdev);
806 return tgtdev;
807 }
808
809 /**
810 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
811 * @mrioc: Adapter instance reference
812 * @persist_id: Persistent ID
813 *
814 * Accessor to retrieve target device from the Persistent ID.
815 * Lock version
816 *
817 * Return: Target device reference.
818 */
mpi3mr_get_tgtdev_by_perst_id(struct mpi3mr_ioc * mrioc,u16 persist_id)819 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
820 struct mpi3mr_ioc *mrioc, u16 persist_id)
821 {
822 struct mpi3mr_tgt_dev *tgtdev;
823 unsigned long flags;
824
825 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
826 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
827 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
828 return tgtdev;
829 }
830
831 /**
832 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
833 * @mrioc: Adapter instance reference
834 * @tgt_priv: Target private data
835 *
836 * Accessor to return target device from the target private
837 * data. Non Lock version
838 *
839 * Return: Target device reference.
840 */
__mpi3mr_get_tgtdev_from_tgtpriv(struct mpi3mr_ioc * mrioc,struct mpi3mr_stgt_priv_data * tgt_priv)841 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
842 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
843 {
844 struct mpi3mr_tgt_dev *tgtdev;
845
846 assert_spin_locked(&mrioc->tgtdev_lock);
847 tgtdev = tgt_priv->tgt_dev;
848 if (tgtdev)
849 mpi3mr_tgtdev_get(tgtdev);
850 return tgtdev;
851 }
852
853 /**
854 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
855 * @mrioc: Adapter instance reference
856 * @tg: Throttle group information pointer
857 * @divert_value: 1 or 0
858 *
859 * Accessor to set io_divert flag for each device associated
860 * with the given throttle group with the given value.
861 *
862 * Return: None.
863 */
mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc * mrioc,struct mpi3mr_throttle_group_info * tg,u8 divert_value)864 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
865 struct mpi3mr_throttle_group_info *tg, u8 divert_value)
866 {
867 unsigned long flags;
868 struct mpi3mr_tgt_dev *tgtdev;
869 struct mpi3mr_stgt_priv_data *tgt_priv;
870
871 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
872 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
873 if (tgtdev->starget && tgtdev->starget->hostdata) {
874 tgt_priv = tgtdev->starget->hostdata;
875 if (tgt_priv->throttle_group == tg)
876 tgt_priv->io_divert = divert_value;
877 }
878 }
879 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
880 }
881
882 /**
883 * mpi3mr_print_device_event_notice - print notice related to post processing of
884 * device event after controller reset.
885 *
886 * @mrioc: Adapter instance reference
887 * @device_add: true for device add event and false for device removal event
888 *
889 * Return: None.
890 */
mpi3mr_print_device_event_notice(struct mpi3mr_ioc * mrioc,bool device_add)891 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
892 bool device_add)
893 {
894 ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
895 (device_add ? "addition" : "removal"));
896 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
897 ioc_notice(mrioc, "are matched with attached devices for correctness\n");
898 }
899
900 /**
901 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
902 * @mrioc: Adapter instance reference
903 * @tgtdev: Target device structure
904 *
905 * Checks whether the device is exposed to upper layers and if it
906 * is then remove the device from upper layers by calling
907 * scsi_remove_target().
908 *
909 * Return: 0 on success, non zero on failure.
910 */
mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev)911 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
912 struct mpi3mr_tgt_dev *tgtdev)
913 {
914 struct mpi3mr_stgt_priv_data *tgt_priv;
915
916 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
917 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
918 if (tgtdev->starget && tgtdev->starget->hostdata) {
919 tgt_priv = tgtdev->starget->hostdata;
920 atomic_set(&tgt_priv->block_io, 0);
921 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
922 }
923
924 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
925 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
926 if (tgtdev->starget) {
927 if (mrioc->current_event)
928 mrioc->current_event->pending_at_sml = 1;
929 scsi_remove_target(&tgtdev->starget->dev);
930 tgtdev->host_exposed = 0;
931 if (mrioc->current_event) {
932 mrioc->current_event->pending_at_sml = 0;
933 if (mrioc->current_event->discard) {
934 mpi3mr_print_device_event_notice(mrioc,
935 false);
936 return;
937 }
938 }
939 }
940 } else
941 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
942 mpi3mr_global_trigger(mrioc,
943 MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED);
944
945 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
946 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
947 }
948
949 /**
950 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
951 * @mrioc: Adapter instance reference
952 * @perst_id: Persistent ID of the device
953 *
954 * Checks whether the device can be exposed to upper layers and
955 * if it is not then expose the device to upper layers by
956 * calling scsi_scan_target().
957 *
958 * Return: 0 on success, non zero on failure.
959 */
mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc * mrioc,u16 perst_id)960 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
961 u16 perst_id)
962 {
963 int retval = 0;
964 struct mpi3mr_tgt_dev *tgtdev;
965
966 if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
967 return -1;
968
969 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
970 if (!tgtdev) {
971 retval = -1;
972 goto out;
973 }
974 if (tgtdev->is_hidden || tgtdev->host_exposed) {
975 retval = -1;
976 goto out;
977 }
978 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
979 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
980 tgtdev->host_exposed = 1;
981 if (mrioc->current_event)
982 mrioc->current_event->pending_at_sml = 1;
983 scsi_scan_target(&mrioc->shost->shost_gendev,
984 mrioc->scsi_device_channel, tgtdev->perst_id,
985 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
986 if (!tgtdev->starget)
987 tgtdev->host_exposed = 0;
988 if (mrioc->current_event) {
989 mrioc->current_event->pending_at_sml = 0;
990 if (mrioc->current_event->discard) {
991 mpi3mr_print_device_event_notice(mrioc, true);
992 goto out;
993 }
994 }
995 dprint_event_bh(mrioc,
996 "exposed target device with handle(0x%04x), perst_id(%d)\n",
997 tgtdev->dev_handle, perst_id);
998 goto out;
999 } else
1000 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
1001 out:
1002 if (tgtdev)
1003 mpi3mr_tgtdev_put(tgtdev);
1004
1005 return retval;
1006 }
1007
1008 /**
1009 * mpi3mr_change_queue_depth- Change QD callback handler
1010 * @sdev: SCSI device reference
1011 * @q_depth: Queue depth
1012 *
1013 * Validate and limit QD and call scsi_change_queue_depth.
1014 *
1015 * Return: return value of scsi_change_queue_depth
1016 */
mpi3mr_change_queue_depth(struct scsi_device * sdev,int q_depth)1017 static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
1018 int q_depth)
1019 {
1020 struct scsi_target *starget = scsi_target(sdev);
1021 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1022 int retval = 0;
1023
1024 if (!sdev->tagged_supported)
1025 q_depth = 1;
1026 if (q_depth > shost->can_queue)
1027 q_depth = shost->can_queue;
1028 else if (!q_depth)
1029 q_depth = MPI3MR_DEFAULT_SDEV_QD;
1030 retval = scsi_change_queue_depth(sdev, q_depth);
1031 sdev->max_queue_depth = sdev->queue_depth;
1032
1033 return retval;
1034 }
1035
mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev * tgt_dev,struct queue_limits * lim)1036 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev,
1037 struct queue_limits *lim)
1038 {
1039 u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP;
1040
1041 lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512;
1042 lim->virt_boundary_mask = (1 << pgsz) - 1;
1043 }
1044
mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev * tgt_dev,struct queue_limits * lim)1045 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev,
1046 struct queue_limits *lim)
1047 {
1048 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE &&
1049 (tgt_dev->dev_spec.pcie_inf.dev_info &
1050 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1051 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
1052 mpi3mr_configure_nvme_dev(tgt_dev, lim);
1053 }
1054
1055 /**
1056 * mpi3mr_update_sdev - Update SCSI device information
1057 * @sdev: SCSI device reference
1058 * @data: target device reference
1059 *
1060 * This is an iterator function called for each SCSI device in a
1061 * target to update the target specific information into each
1062 * SCSI device.
1063 *
1064 * Return: Nothing.
1065 */
1066 static void
mpi3mr_update_sdev(struct scsi_device * sdev,void * data)1067 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
1068 {
1069 struct mpi3mr_tgt_dev *tgtdev;
1070 struct queue_limits lim;
1071
1072 tgtdev = (struct mpi3mr_tgt_dev *)data;
1073 if (!tgtdev)
1074 return;
1075
1076 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
1077
1078 lim = queue_limits_start_update(sdev->request_queue);
1079 mpi3mr_configure_tgt_dev(tgtdev, &lim);
1080 WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim));
1081 }
1082
1083 /**
1084 * mpi3mr_refresh_tgtdevs - Refresh target device exposure
1085 * @mrioc: Adapter instance reference
1086 *
1087 * This is executed post controller reset to identify any
1088 * missing devices during reset and remove from the upper layers
1089 * or expose any newly detected device to the upper layers.
1090 *
1091 * Return: Nothing.
1092 */
mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc * mrioc)1093 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc)
1094 {
1095 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
1096 struct mpi3mr_stgt_priv_data *tgt_priv;
1097
1098 dprint_reset(mrioc, "refresh target devices: check for removals\n");
1099 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
1100 list) {
1101 if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) ||
1102 tgtdev->is_hidden) &&
1103 tgtdev->host_exposed && tgtdev->starget &&
1104 tgtdev->starget->hostdata) {
1105 tgt_priv = tgtdev->starget->hostdata;
1106 tgt_priv->dev_removed = 1;
1107 atomic_set(&tgt_priv->block_io, 0);
1108 }
1109 }
1110
1111 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
1112 list) {
1113 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
1114 dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
1115 tgtdev->perst_id);
1116 if (tgtdev->host_exposed)
1117 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1118 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
1119 mpi3mr_tgtdev_put(tgtdev);
1120 } else if (tgtdev->is_hidden & tgtdev->host_exposed) {
1121 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
1122 tgtdev->perst_id);
1123 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1124 }
1125 }
1126
1127 tgtdev = NULL;
1128 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1129 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
1130 !tgtdev->is_hidden) {
1131 if (!tgtdev->host_exposed)
1132 mpi3mr_report_tgtdev_to_host(mrioc,
1133 tgtdev->perst_id);
1134 else if (tgtdev->starget)
1135 starget_for_each_device(tgtdev->starget,
1136 (void *)tgtdev, mpi3mr_update_sdev);
1137 }
1138 }
1139 }
1140
1141 /**
1142 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
1143 * @mrioc: Adapter instance reference
1144 * @tgtdev: Target device internal structure
1145 * @dev_pg0: New device page0
1146 * @is_added: Flag to indicate the device is just added
1147 *
1148 * Update the information from the device page0 into the driver
1149 * cached target device structure.
1150 *
1151 * Return: Nothing.
1152 */
mpi3mr_update_tgtdev(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev,struct mpi3_device_page0 * dev_pg0,bool is_added)1153 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
1154 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
1155 bool is_added)
1156 {
1157 u16 flags = 0;
1158 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1159 struct mpi3mr_enclosure_node *enclosure_dev = NULL;
1160 u8 prot_mask = 0;
1161
1162 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
1163 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1164 tgtdev->dev_type = dev_pg0->device_form;
1165 tgtdev->io_unit_port = dev_pg0->io_unit_port;
1166 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
1167 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
1168 tgtdev->slot = le16_to_cpu(dev_pg0->slot);
1169 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
1170 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
1171 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
1172
1173 if (tgtdev->encl_handle)
1174 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
1175 tgtdev->encl_handle);
1176 if (enclosure_dev)
1177 tgtdev->enclosure_logical_id = le64_to_cpu(
1178 enclosure_dev->pg0.enclosure_logical_id);
1179
1180 flags = tgtdev->devpg0_flag;
1181
1182 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
1183
1184 if (is_added == true)
1185 tgtdev->io_throttle_enabled =
1186 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
1187
1188 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
1189 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
1190 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS;
1191 break;
1192 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
1193 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS;
1194 break;
1195 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
1196 default:
1197 tgtdev->wslen = 0;
1198 break;
1199 }
1200
1201 if (tgtdev->starget && tgtdev->starget->hostdata) {
1202 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1203 tgtdev->starget->hostdata;
1204 scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
1205 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
1206 scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
1207 scsi_tgt_priv_data->io_throttle_enabled =
1208 tgtdev->io_throttle_enabled;
1209 if (is_added == true)
1210 atomic_set(&scsi_tgt_priv_data->block_io, 0);
1211 scsi_tgt_priv_data->wslen = tgtdev->wslen;
1212 }
1213
1214 switch (dev_pg0->access_status) {
1215 case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
1216 case MPI3_DEVICE0_ASTATUS_PREPARE:
1217 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
1218 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
1219 break;
1220 default:
1221 tgtdev->is_hidden = 1;
1222 break;
1223 }
1224
1225 switch (tgtdev->dev_type) {
1226 case MPI3_DEVICE_DEVFORM_SAS_SATA:
1227 {
1228 struct mpi3_device0_sas_sata_format *sasinf =
1229 &dev_pg0->device_specific.sas_sata_format;
1230 u16 dev_info = le16_to_cpu(sasinf->device_info);
1231
1232 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
1233 tgtdev->dev_spec.sas_sata_inf.sas_address =
1234 le64_to_cpu(sasinf->sas_address);
1235 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
1236 tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
1237 sasinf->attached_phy_identifier;
1238 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
1239 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
1240 tgtdev->is_hidden = 1;
1241 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
1242 MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
1243 tgtdev->is_hidden = 1;
1244
1245 if (((tgtdev->devpg0_flag &
1246 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
1247 && (tgtdev->devpg0_flag &
1248 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
1249 (tgtdev->parent_handle == 0xFFFF))
1250 tgtdev->non_stl = 1;
1251 if (tgtdev->dev_spec.sas_sata_inf.hba_port)
1252 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
1253 dev_pg0->io_unit_port;
1254 break;
1255 }
1256 case MPI3_DEVICE_DEVFORM_PCIE:
1257 {
1258 struct mpi3_device0_pcie_format *pcieinf =
1259 &dev_pg0->device_specific.pcie_format;
1260 u16 dev_info = le16_to_cpu(pcieinf->device_info);
1261
1262 tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
1263 tgtdev->dev_spec.pcie_inf.capb =
1264 le32_to_cpu(pcieinf->capabilities);
1265 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
1266 /* 2^12 = 4096 */
1267 tgtdev->dev_spec.pcie_inf.pgsz = 12;
1268 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
1269 tgtdev->dev_spec.pcie_inf.mdts =
1270 le32_to_cpu(pcieinf->maximum_data_transfer_size);
1271 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
1272 tgtdev->dev_spec.pcie_inf.reset_to =
1273 max_t(u8, pcieinf->controller_reset_to,
1274 MPI3MR_INTADMCMD_TIMEOUT);
1275 tgtdev->dev_spec.pcie_inf.abort_to =
1276 max_t(u8, pcieinf->nvme_abort_to,
1277 MPI3MR_INTADMCMD_TIMEOUT);
1278 }
1279 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
1280 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
1281 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1282 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
1283 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1284 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
1285 tgtdev->is_hidden = 1;
1286 tgtdev->non_stl = 1;
1287 if (!mrioc->shost)
1288 break;
1289 prot_mask = scsi_host_get_prot(mrioc->shost);
1290 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
1291 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
1292 ioc_info(mrioc,
1293 "%s : Disabling DIX0 prot capability\n", __func__);
1294 ioc_info(mrioc,
1295 "because HBA does not support DIX0 operation on NVME drives\n");
1296 }
1297 break;
1298 }
1299 case MPI3_DEVICE_DEVFORM_VD:
1300 {
1301 struct mpi3_device0_vd_format *vdinf =
1302 &dev_pg0->device_specific.vd_format;
1303 struct mpi3mr_throttle_group_info *tg = NULL;
1304 u16 vdinf_io_throttle_group =
1305 le16_to_cpu(vdinf->io_throttle_group);
1306
1307 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
1308 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
1309 tgtdev->is_hidden = 1;
1310 tgtdev->non_stl = 1;
1311 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
1312 tgtdev->dev_spec.vd_inf.tg_high =
1313 le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
1314 tgtdev->dev_spec.vd_inf.tg_low =
1315 le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
1316 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
1317 tg = mrioc->throttle_groups + vdinf_io_throttle_group;
1318 tg->id = vdinf_io_throttle_group;
1319 tg->high = tgtdev->dev_spec.vd_inf.tg_high;
1320 tg->low = tgtdev->dev_spec.vd_inf.tg_low;
1321 tg->qd_reduction =
1322 tgtdev->dev_spec.vd_inf.tg_qd_reduction;
1323 if (is_added == true)
1324 tg->fw_qd = tgtdev->q_depth;
1325 tg->modified_qd = tgtdev->q_depth;
1326 }
1327 tgtdev->dev_spec.vd_inf.tg = tg;
1328 if (scsi_tgt_priv_data)
1329 scsi_tgt_priv_data->throttle_group = tg;
1330 break;
1331 }
1332 default:
1333 break;
1334 }
1335 }
1336
1337 /**
1338 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1339 * @mrioc: Adapter instance reference
1340 * @fwevt: Firmware event information.
1341 *
1342 * Process Device status Change event and based on device's new
1343 * information, either expose the device to the upper layers, or
1344 * remove the device from upper layers.
1345 *
1346 * Return: Nothing.
1347 */
mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1348 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
1349 struct mpi3mr_fwevt *fwevt)
1350 {
1351 u16 dev_handle = 0;
1352 u8 uhide = 0, delete = 0, cleanup = 0;
1353 struct mpi3mr_tgt_dev *tgtdev = NULL;
1354 struct mpi3_event_data_device_status_change *evtdata =
1355 (struct mpi3_event_data_device_status_change *)fwevt->event_data;
1356
1357 dev_handle = le16_to_cpu(evtdata->dev_handle);
1358 dprint_event_bh(mrioc,
1359 "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n",
1360 dev_handle, evtdata->reason_code);
1361 switch (evtdata->reason_code) {
1362 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1363 delete = 1;
1364 break;
1365 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1366 uhide = 1;
1367 break;
1368 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1369 delete = 1;
1370 cleanup = 1;
1371 break;
1372 default:
1373 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
1374 evtdata->reason_code);
1375 break;
1376 }
1377
1378 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1379 if (!tgtdev) {
1380 dprint_event_bh(mrioc,
1381 "processing device status change event bottom half,\n"
1382 "cannot identify target device for handle(0x%04x), rc(0x%02x)\n",
1383 dev_handle, evtdata->reason_code);
1384 goto out;
1385 }
1386 if (uhide) {
1387 tgtdev->is_hidden = 0;
1388 if (!tgtdev->host_exposed)
1389 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1390 }
1391
1392 if (delete)
1393 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1394
1395 if (cleanup) {
1396 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1397 mpi3mr_tgtdev_put(tgtdev);
1398 }
1399
1400 out:
1401 if (tgtdev)
1402 mpi3mr_tgtdev_put(tgtdev);
1403 }
1404
1405 /**
1406 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1407 * @mrioc: Adapter instance reference
1408 * @dev_pg0: New device page0
1409 *
1410 * Process Device Info Change event and based on device's new
1411 * information, either expose the device to the upper layers, or
1412 * remove the device from upper layers or update the details of
1413 * the device.
1414 *
1415 * Return: Nothing.
1416 */
mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3_device_page0 * dev_pg0)1417 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
1418 struct mpi3_device_page0 *dev_pg0)
1419 {
1420 struct mpi3mr_tgt_dev *tgtdev = NULL;
1421 u16 dev_handle = 0, perst_id = 0;
1422
1423 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1424 dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1425 dprint_event_bh(mrioc,
1426 "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n",
1427 dev_handle, perst_id);
1428 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1429 if (!tgtdev) {
1430 dprint_event_bh(mrioc,
1431 "cannot identify target device for device info\n"
1432 "change event handle(0x%04x), perst_id(%d)\n",
1433 dev_handle, perst_id);
1434 goto out;
1435 }
1436 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
1437 if (!tgtdev->is_hidden && !tgtdev->host_exposed)
1438 mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1439 if (tgtdev->is_hidden && tgtdev->host_exposed)
1440 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1441 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
1442 starget_for_each_device(tgtdev->starget, (void *)tgtdev,
1443 mpi3mr_update_sdev);
1444 out:
1445 if (tgtdev)
1446 mpi3mr_tgtdev_put(tgtdev);
1447 }
1448
1449 /**
1450 * mpi3mr_free_enclosure_list - release enclosures
1451 * @mrioc: Adapter instance reference
1452 *
1453 * Free memory allocated during encloure add.
1454 *
1455 * Return nothing.
1456 */
mpi3mr_free_enclosure_list(struct mpi3mr_ioc * mrioc)1457 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
1458 {
1459 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
1460
1461 list_for_each_entry_safe(enclosure_dev,
1462 enclosure_dev_next, &mrioc->enclosure_list, list) {
1463 list_del(&enclosure_dev->list);
1464 kfree(enclosure_dev);
1465 }
1466 }
1467
1468 /**
1469 * mpi3mr_enclosure_find_by_handle - enclosure search by handle
1470 * @mrioc: Adapter instance reference
1471 * @handle: Firmware device handle of the enclosure
1472 *
1473 * This searches for enclosure device based on handle, then returns the
1474 * enclosure object.
1475 *
1476 * Return: Enclosure object reference or NULL
1477 */
mpi3mr_enclosure_find_by_handle(struct mpi3mr_ioc * mrioc,u16 handle)1478 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
1479 struct mpi3mr_ioc *mrioc, u16 handle)
1480 {
1481 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
1482
1483 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
1484 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
1485 continue;
1486 r = enclosure_dev;
1487 goto out;
1488 }
1489 out:
1490 return r;
1491 }
1492
1493 /**
1494 * mpi3mr_process_trigger_data_event_bh - Process trigger event
1495 * data
1496 * @mrioc: Adapter instance reference
1497 * @event_data: Event data
1498 *
1499 * This function releases diage buffers or issues diag fault
1500 * based on trigger conditions
1501 *
1502 * Return: Nothing
1503 */
mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc * mrioc,struct trigger_event_data * event_data)1504 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc,
1505 struct trigger_event_data *event_data)
1506 {
1507 struct diag_buffer_desc *trace_hdb = event_data->trace_hdb;
1508 struct diag_buffer_desc *fw_hdb = event_data->fw_hdb;
1509 unsigned long flags;
1510 int retval = 0;
1511 u8 trigger_type = event_data->trigger_type;
1512 union mpi3mr_trigger_data *trigger_data =
1513 &event_data->trigger_specific_data;
1514
1515 if (event_data->snapdump) {
1516 if (trace_hdb)
1517 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
1518 trigger_data, 1);
1519 if (fw_hdb)
1520 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
1521 trigger_data, 1);
1522 mpi3mr_soft_reset_handler(mrioc,
1523 MPI3MR_RESET_FROM_TRIGGER, 1);
1524 return;
1525 }
1526
1527 if (trace_hdb) {
1528 retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb);
1529 if (!retval) {
1530 mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
1531 trigger_data, 1);
1532 }
1533 spin_lock_irqsave(&mrioc->trigger_lock, flags);
1534 mrioc->trace_release_trigger_active = false;
1535 spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
1536 }
1537 if (fw_hdb) {
1538 retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb);
1539 if (!retval) {
1540 mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
1541 trigger_data, 1);
1542 }
1543 spin_lock_irqsave(&mrioc->trigger_lock, flags);
1544 mrioc->fw_release_trigger_active = false;
1545 spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
1546 }
1547 }
1548
1549 /**
1550 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
1551 * @mrioc: Adapter instance reference
1552 * @encl_pg0: Enclosure page 0.
1553 * @is_added: Added event or not
1554 *
1555 * Return nothing.
1556 */
mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc * mrioc,struct mpi3_enclosure_page0 * encl_pg0,u8 is_added)1557 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
1558 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
1559 {
1560 char *reason_str = NULL;
1561
1562 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
1563 return;
1564
1565 if (is_added)
1566 reason_str = "enclosure added";
1567 else
1568 reason_str = "enclosure dev status changed";
1569
1570 ioc_info(mrioc,
1571 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
1572 reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
1573 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
1574 ioc_info(mrioc,
1575 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
1576 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
1577 le16_to_cpu(encl_pg0->flags),
1578 ((le16_to_cpu(encl_pg0->flags) &
1579 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
1580 }
1581
1582 /**
1583 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
1584 * @mrioc: Adapter instance reference
1585 * @fwevt: Firmware event reference
1586 *
1587 * Prints information about the Enclosure device status or
1588 * Enclosure add events if logging is enabled and add or remove
1589 * the enclosure from the controller's internal list of
1590 * enclosures.
1591 *
1592 * Return: Nothing.
1593 */
mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1594 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
1595 struct mpi3mr_fwevt *fwevt)
1596 {
1597 struct mpi3mr_enclosure_node *enclosure_dev = NULL;
1598 struct mpi3_enclosure_page0 *encl_pg0;
1599 u16 encl_handle;
1600 u8 added, present;
1601
1602 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
1603 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
1604 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
1605
1606
1607 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
1608 present = ((le16_to_cpu(encl_pg0->flags) &
1609 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
1610
1611 if (encl_handle)
1612 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
1613 encl_handle);
1614 if (!enclosure_dev && present) {
1615 enclosure_dev =
1616 kzalloc(sizeof(struct mpi3mr_enclosure_node),
1617 GFP_KERNEL);
1618 if (!enclosure_dev)
1619 return;
1620 list_add_tail(&enclosure_dev->list,
1621 &mrioc->enclosure_list);
1622 }
1623 if (enclosure_dev) {
1624 if (!present) {
1625 list_del(&enclosure_dev->list);
1626 kfree(enclosure_dev);
1627 } else
1628 memcpy(&enclosure_dev->pg0, encl_pg0,
1629 sizeof(enclosure_dev->pg0));
1630
1631 }
1632 }
1633
1634 /**
1635 * mpi3mr_sastopochg_evt_debug - SASTopoChange details
1636 * @mrioc: Adapter instance reference
1637 * @event_data: SAS topology change list event data
1638 *
1639 * Prints information about the SAS topology change event.
1640 *
1641 * Return: Nothing.
1642 */
1643 static void
mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc * mrioc,struct mpi3_event_data_sas_topology_change_list * event_data)1644 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1645 struct mpi3_event_data_sas_topology_change_list *event_data)
1646 {
1647 int i;
1648 u16 handle;
1649 u8 reason_code, phy_number;
1650 char *status_str = NULL;
1651 u8 link_rate, prev_link_rate;
1652
1653 switch (event_data->exp_status) {
1654 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1655 status_str = "remove";
1656 break;
1657 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1658 status_str = "responding";
1659 break;
1660 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1661 status_str = "remove delay";
1662 break;
1663 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1664 status_str = "direct attached";
1665 break;
1666 default:
1667 status_str = "unknown status";
1668 break;
1669 }
1670 ioc_info(mrioc, "%s :sas topology change: (%s)\n",
1671 __func__, status_str);
1672 ioc_info(mrioc,
1673 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
1674 __func__, le16_to_cpu(event_data->expander_dev_handle),
1675 event_data->io_unit_port,
1676 le16_to_cpu(event_data->enclosure_handle),
1677 event_data->start_phy_num, event_data->num_entries);
1678 for (i = 0; i < event_data->num_entries; i++) {
1679 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1680 if (!handle)
1681 continue;
1682 phy_number = event_data->start_phy_num + i;
1683 reason_code = event_data->phy_entry[i].status &
1684 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1685 switch (reason_code) {
1686 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1687 status_str = "target remove";
1688 break;
1689 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1690 status_str = "delay target remove";
1691 break;
1692 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1693 status_str = "link status change";
1694 break;
1695 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1696 status_str = "link status no change";
1697 break;
1698 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1699 status_str = "target responding";
1700 break;
1701 default:
1702 status_str = "unknown";
1703 break;
1704 }
1705 link_rate = event_data->phy_entry[i].link_rate >> 4;
1706 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1707 ioc_info(mrioc,
1708 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1709 __func__, phy_number, handle, status_str, link_rate,
1710 prev_link_rate);
1711 }
1712 }
1713
1714 /**
1715 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
1716 * @mrioc: Adapter instance reference
1717 * @fwevt: Firmware event reference
1718 *
1719 * Prints information about the SAS topology change event and
1720 * for "not responding" event code, removes the device from the
1721 * upper layers.
1722 *
1723 * Return: Nothing.
1724 */
mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1725 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1726 struct mpi3mr_fwevt *fwevt)
1727 {
1728 struct mpi3_event_data_sas_topology_change_list *event_data =
1729 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
1730 int i;
1731 u16 handle;
1732 u8 reason_code;
1733 u64 exp_sas_address = 0, parent_sas_address = 0;
1734 struct mpi3mr_hba_port *hba_port = NULL;
1735 struct mpi3mr_tgt_dev *tgtdev = NULL;
1736 struct mpi3mr_sas_node *sas_expander = NULL;
1737 unsigned long flags;
1738 u8 link_rate, prev_link_rate, parent_phy_number;
1739
1740 mpi3mr_sastopochg_evt_debug(mrioc, event_data);
1741 if (mrioc->sas_transport_enabled) {
1742 hba_port = mpi3mr_get_hba_port_by_id(mrioc,
1743 event_data->io_unit_port);
1744 if (le16_to_cpu(event_data->expander_dev_handle)) {
1745 spin_lock_irqsave(&mrioc->sas_node_lock, flags);
1746 sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
1747 le16_to_cpu(event_data->expander_dev_handle));
1748 if (sas_expander) {
1749 exp_sas_address = sas_expander->sas_address;
1750 hba_port = sas_expander->hba_port;
1751 }
1752 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
1753 parent_sas_address = exp_sas_address;
1754 } else
1755 parent_sas_address = mrioc->sas_hba.sas_address;
1756 }
1757
1758 for (i = 0; i < event_data->num_entries; i++) {
1759 if (fwevt->discard)
1760 return;
1761 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1762 if (!handle)
1763 continue;
1764 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1765 if (!tgtdev)
1766 continue;
1767
1768 reason_code = event_data->phy_entry[i].status &
1769 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1770
1771 switch (reason_code) {
1772 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1773 if (tgtdev->host_exposed)
1774 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1775 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1776 mpi3mr_tgtdev_put(tgtdev);
1777 break;
1778 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1779 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1780 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1781 {
1782 if (!mrioc->sas_transport_enabled || tgtdev->non_stl
1783 || tgtdev->is_hidden)
1784 break;
1785 link_rate = event_data->phy_entry[i].link_rate >> 4;
1786 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1787 if (link_rate == prev_link_rate)
1788 break;
1789 if (!parent_sas_address)
1790 break;
1791 parent_phy_number = event_data->start_phy_num + i;
1792 mpi3mr_update_links(mrioc, parent_sas_address, handle,
1793 parent_phy_number, link_rate, hba_port);
1794 break;
1795 }
1796 default:
1797 break;
1798 }
1799 if (tgtdev)
1800 mpi3mr_tgtdev_put(tgtdev);
1801 }
1802
1803 if (mrioc->sas_transport_enabled && (event_data->exp_status ==
1804 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
1805 if (sas_expander)
1806 mpi3mr_expander_remove(mrioc, exp_sas_address,
1807 hba_port);
1808 }
1809 }
1810
1811 /**
1812 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
1813 * @mrioc: Adapter instance reference
1814 * @event_data: PCIe topology change list event data
1815 *
1816 * Prints information about the PCIe topology change event.
1817 *
1818 * Return: Nothing.
1819 */
1820 static void
mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc * mrioc,struct mpi3_event_data_pcie_topology_change_list * event_data)1821 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1822 struct mpi3_event_data_pcie_topology_change_list *event_data)
1823 {
1824 int i;
1825 u16 handle;
1826 u16 reason_code;
1827 u8 port_number;
1828 char *status_str = NULL;
1829 u8 link_rate, prev_link_rate;
1830
1831 switch (event_data->switch_status) {
1832 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1833 status_str = "remove";
1834 break;
1835 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1836 status_str = "responding";
1837 break;
1838 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1839 status_str = "remove delay";
1840 break;
1841 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1842 status_str = "direct attached";
1843 break;
1844 default:
1845 status_str = "unknown status";
1846 break;
1847 }
1848 ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
1849 __func__, status_str);
1850 ioc_info(mrioc,
1851 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
1852 __func__, le16_to_cpu(event_data->switch_dev_handle),
1853 le16_to_cpu(event_data->enclosure_handle),
1854 event_data->start_port_num, event_data->num_entries);
1855 for (i = 0; i < event_data->num_entries; i++) {
1856 handle =
1857 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1858 if (!handle)
1859 continue;
1860 port_number = event_data->start_port_num + i;
1861 reason_code = event_data->port_entry[i].port_status;
1862 switch (reason_code) {
1863 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1864 status_str = "target remove";
1865 break;
1866 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1867 status_str = "delay target remove";
1868 break;
1869 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1870 status_str = "link status change";
1871 break;
1872 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1873 status_str = "link status no change";
1874 break;
1875 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1876 status_str = "target responding";
1877 break;
1878 default:
1879 status_str = "unknown";
1880 break;
1881 }
1882 link_rate = event_data->port_entry[i].current_port_info &
1883 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1884 prev_link_rate = event_data->port_entry[i].previous_port_info &
1885 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1886 ioc_info(mrioc,
1887 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1888 __func__, port_number, handle, status_str, link_rate,
1889 prev_link_rate);
1890 }
1891 }
1892
1893 /**
1894 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
1895 * @mrioc: Adapter instance reference
1896 * @fwevt: Firmware event reference
1897 *
1898 * Prints information about the PCIe topology change event and
1899 * for "not responding" event code, removes the device from the
1900 * upper layers.
1901 *
1902 * Return: Nothing.
1903 */
mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1904 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1905 struct mpi3mr_fwevt *fwevt)
1906 {
1907 struct mpi3_event_data_pcie_topology_change_list *event_data =
1908 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
1909 int i;
1910 u16 handle;
1911 u8 reason_code;
1912 struct mpi3mr_tgt_dev *tgtdev = NULL;
1913
1914 mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
1915
1916 for (i = 0; i < event_data->num_entries; i++) {
1917 if (fwevt->discard)
1918 return;
1919 handle =
1920 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1921 if (!handle)
1922 continue;
1923 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1924 if (!tgtdev)
1925 continue;
1926
1927 reason_code = event_data->port_entry[i].port_status;
1928
1929 switch (reason_code) {
1930 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1931 if (tgtdev->host_exposed)
1932 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1933 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1934 mpi3mr_tgtdev_put(tgtdev);
1935 break;
1936 default:
1937 break;
1938 }
1939 if (tgtdev)
1940 mpi3mr_tgtdev_put(tgtdev);
1941 }
1942 }
1943
1944 /**
1945 * mpi3mr_logdata_evt_bh - Log data event bottomhalf
1946 * @mrioc: Adapter instance reference
1947 * @fwevt: Firmware event reference
1948 *
1949 * Extracts the event data and calls application interfacing
1950 * function to process the event further.
1951 *
1952 * Return: Nothing.
1953 */
mpi3mr_logdata_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1954 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
1955 struct mpi3mr_fwevt *fwevt)
1956 {
1957 mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
1958 fwevt->event_data_size);
1959 }
1960
1961 /**
1962 * mpi3mr_update_sdev_qd - Update SCSI device queue depath
1963 * @sdev: SCSI device reference
1964 * @data: Queue depth reference
1965 *
1966 * This is an iterator function called for each SCSI device in a
1967 * target to update the QD of each SCSI device.
1968 *
1969 * Return: Nothing.
1970 */
mpi3mr_update_sdev_qd(struct scsi_device * sdev,void * data)1971 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
1972 {
1973 u16 *q_depth = (u16 *)data;
1974
1975 scsi_change_queue_depth(sdev, (int)*q_depth);
1976 sdev->max_queue_depth = sdev->queue_depth;
1977 }
1978
1979 /**
1980 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
1981 * @mrioc: Adapter instance reference
1982 * @tg: Throttle group information pointer
1983 *
1984 * Accessor to reduce QD for each device associated with the
1985 * given throttle group.
1986 *
1987 * Return: None.
1988 */
mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc * mrioc,struct mpi3mr_throttle_group_info * tg)1989 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
1990 struct mpi3mr_throttle_group_info *tg)
1991 {
1992 unsigned long flags;
1993 struct mpi3mr_tgt_dev *tgtdev;
1994 struct mpi3mr_stgt_priv_data *tgt_priv;
1995
1996
1997 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1998 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1999 if (tgtdev->starget && tgtdev->starget->hostdata) {
2000 tgt_priv = tgtdev->starget->hostdata;
2001 if (tgt_priv->throttle_group == tg) {
2002 dprint_event_bh(mrioc,
2003 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
2004 tgt_priv->perst_id, tgtdev->q_depth,
2005 tg->modified_qd);
2006 starget_for_each_device(tgtdev->starget,
2007 (void *)&tg->modified_qd,
2008 mpi3mr_update_sdev_qd);
2009 }
2010 }
2011 }
2012 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2013 }
2014
2015 /**
2016 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
2017 * @mrioc: Adapter instance reference
2018 * @fwevt: Firmware event reference
2019 *
2020 * Identifies the firmware event and calls corresponding bottomg
2021 * half handler and sends event acknowledgment if required.
2022 *
2023 * Return: Nothing.
2024 */
mpi3mr_fwevt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)2025 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
2026 struct mpi3mr_fwevt *fwevt)
2027 {
2028 struct mpi3_device_page0 *dev_pg0 = NULL;
2029 u16 perst_id, handle, dev_info;
2030 struct mpi3_device0_sas_sata_format *sasinf = NULL;
2031 unsigned int timeout;
2032
2033 mpi3mr_fwevt_del_from_list(mrioc, fwevt);
2034 mrioc->current_event = fwevt;
2035
2036 if (mrioc->stop_drv_processing) {
2037 dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n"
2038 "due to stop_drv_processing\n", fwevt->event_id);
2039 goto out;
2040 }
2041
2042 if (mrioc->unrecoverable) {
2043 dprint_event_bh(mrioc,
2044 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
2045 fwevt->event_id);
2046 goto out;
2047 }
2048
2049 if (!fwevt->process_evt)
2050 goto evt_ack;
2051
2052 dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n",
2053 fwevt->event_id);
2054
2055 switch (fwevt->event_id) {
2056 case MPI3_EVENT_DEVICE_ADDED:
2057 {
2058 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
2059 perst_id = le16_to_cpu(dev_pg0->persistent_id);
2060 handle = le16_to_cpu(dev_pg0->dev_handle);
2061 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
2062 mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
2063 else if (mrioc->sas_transport_enabled &&
2064 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
2065 sasinf = &dev_pg0->device_specific.sas_sata_format;
2066 dev_info = le16_to_cpu(sasinf->device_info);
2067 if (!mrioc->sas_hba.num_phys)
2068 mpi3mr_sas_host_add(mrioc);
2069 else
2070 mpi3mr_sas_host_refresh(mrioc);
2071
2072 if (mpi3mr_is_expander_device(dev_info))
2073 mpi3mr_expander_add(mrioc, handle);
2074 }
2075 break;
2076 }
2077 case MPI3_EVENT_DEVICE_INFO_CHANGED:
2078 {
2079 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
2080 perst_id = le16_to_cpu(dev_pg0->persistent_id);
2081 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
2082 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
2083 break;
2084 }
2085 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2086 {
2087 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
2088 break;
2089 }
2090 case MPI3_EVENT_ENCL_DEVICE_ADDED:
2091 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
2092 {
2093 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
2094 break;
2095 }
2096
2097 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2098 {
2099 mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
2100 break;
2101 }
2102 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2103 {
2104 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
2105 break;
2106 }
2107 case MPI3_EVENT_LOG_DATA:
2108 {
2109 mpi3mr_logdata_evt_bh(mrioc, fwevt);
2110 break;
2111 }
2112 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
2113 {
2114 struct mpi3mr_throttle_group_info *tg;
2115
2116 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
2117 dprint_event_bh(mrioc,
2118 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
2119 tg->id, tg->need_qd_reduction);
2120 if (tg->need_qd_reduction) {
2121 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
2122 tg->need_qd_reduction = 0;
2123 }
2124 break;
2125 }
2126 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
2127 {
2128 timeout = MPI3MR_RESET_TIMEOUT * 2;
2129 while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) &&
2130 !mrioc->unrecoverable && !mrioc->pci_err_recovery) {
2131 msleep(500);
2132 if (!timeout--) {
2133 mrioc->unrecoverable = 1;
2134 break;
2135 }
2136 }
2137
2138 if (mrioc->unrecoverable || mrioc->pci_err_recovery)
2139 break;
2140
2141 dprint_event_bh(mrioc,
2142 "scan for non responding and newly added devices after soft reset started\n");
2143 if (mrioc->sas_transport_enabled) {
2144 mpi3mr_refresh_sas_ports(mrioc);
2145 mpi3mr_refresh_expanders(mrioc);
2146 }
2147 mpi3mr_refresh_tgtdevs(mrioc);
2148 ioc_info(mrioc,
2149 "scan for non responding and newly added devices after soft reset completed\n");
2150 break;
2151 }
2152 case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER:
2153 {
2154 mpi3mr_process_trigger_data_event_bh(mrioc,
2155 (struct trigger_event_data *)fwevt->event_data);
2156 break;
2157 }
2158 default:
2159 break;
2160 }
2161
2162 evt_ack:
2163 if (fwevt->send_ack)
2164 mpi3mr_process_event_ack(mrioc, fwevt->event_id,
2165 fwevt->evt_ctx);
2166 out:
2167 /* Put fwevt reference count to neutralize kref_init increment */
2168 mpi3mr_fwevt_put(fwevt);
2169 mrioc->current_event = NULL;
2170 }
2171
2172 /**
2173 * mpi3mr_fwevt_worker - Firmware event worker
2174 * @work: Work struct containing firmware event
2175 *
2176 * Extracts the firmware event and calls mpi3mr_fwevt_bh.
2177 *
2178 * Return: Nothing.
2179 */
mpi3mr_fwevt_worker(struct work_struct * work)2180 static void mpi3mr_fwevt_worker(struct work_struct *work)
2181 {
2182 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
2183 work);
2184 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
2185 /*
2186 * Put fwevt reference count after
2187 * dequeuing it from worker queue
2188 */
2189 mpi3mr_fwevt_put(fwevt);
2190 }
2191
2192 /**
2193 * mpi3mr_create_tgtdev - Create and add a target device
2194 * @mrioc: Adapter instance reference
2195 * @dev_pg0: Device Page 0 data
2196 *
2197 * If the device specified by the device page 0 data is not
2198 * present in the driver's internal list, allocate the memory
2199 * for the device, populate the data and add to the list, else
2200 * update the device data. The key is persistent ID.
2201 *
2202 * Return: 0 on success, -ENOMEM on memory allocation failure
2203 */
mpi3mr_create_tgtdev(struct mpi3mr_ioc * mrioc,struct mpi3_device_page0 * dev_pg0)2204 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
2205 struct mpi3_device_page0 *dev_pg0)
2206 {
2207 int retval = 0;
2208 struct mpi3mr_tgt_dev *tgtdev = NULL;
2209 u16 perst_id = 0;
2210 unsigned long flags;
2211
2212 perst_id = le16_to_cpu(dev_pg0->persistent_id);
2213 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
2214 return retval;
2215
2216 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2217 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
2218 if (tgtdev)
2219 tgtdev->state = MPI3MR_DEV_CREATED;
2220 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2221
2222 if (tgtdev) {
2223 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
2224 mpi3mr_tgtdev_put(tgtdev);
2225 } else {
2226 tgtdev = mpi3mr_alloc_tgtdev();
2227 if (!tgtdev)
2228 return -ENOMEM;
2229 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
2230 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
2231 }
2232
2233 return retval;
2234 }
2235
2236 /**
2237 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
2238 * @mrioc: Adapter instance reference
2239 *
2240 * Flush pending commands in the delayed lists due to a
2241 * controller reset or driver removal as a cleanup.
2242 *
2243 * Return: Nothing
2244 */
mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc * mrioc)2245 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
2246 {
2247 struct delayed_dev_rmhs_node *_rmhs_node;
2248 struct delayed_evt_ack_node *_evtack_node;
2249
2250 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
2251 while (!list_empty(&mrioc->delayed_rmhs_list)) {
2252 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
2253 struct delayed_dev_rmhs_node, list);
2254 list_del(&_rmhs_node->list);
2255 kfree(_rmhs_node);
2256 }
2257 dprint_reset(mrioc, "flushing delayed event ack commands\n");
2258 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
2259 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
2260 struct delayed_evt_ack_node, list);
2261 list_del(&_evtack_node->list);
2262 kfree(_evtack_node);
2263 }
2264 }
2265
2266 /**
2267 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
2268 * @mrioc: Adapter instance reference
2269 * @drv_cmd: Internal command tracker
2270 *
2271 * Issues a target reset TM to the firmware from the device
2272 * removal TM pend list or retry the removal handshake sequence
2273 * based on the IOU control request IOC status.
2274 *
2275 * Return: Nothing
2276 */
mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)2277 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
2278 struct mpi3mr_drv_cmd *drv_cmd)
2279 {
2280 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2281 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
2282
2283 if (drv_cmd->state & MPI3MR_CMD_RESET)
2284 goto clear_drv_cmd;
2285
2286 ioc_info(mrioc,
2287 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
2288 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
2289 drv_cmd->ioc_loginfo);
2290 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
2291 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
2292 drv_cmd->retry_count++;
2293 ioc_info(mrioc,
2294 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
2295 __func__, drv_cmd->dev_handle,
2296 drv_cmd->retry_count);
2297 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
2298 drv_cmd, drv_cmd->iou_rc);
2299 return;
2300 }
2301 ioc_err(mrioc,
2302 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
2303 __func__, drv_cmd->dev_handle);
2304 } else {
2305 ioc_info(mrioc,
2306 "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
2307 __func__, drv_cmd->dev_handle);
2308 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
2309 }
2310
2311 if (!list_empty(&mrioc->delayed_rmhs_list)) {
2312 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
2313 struct delayed_dev_rmhs_node, list);
2314 drv_cmd->dev_handle = delayed_dev_rmhs->handle;
2315 drv_cmd->retry_count = 0;
2316 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
2317 ioc_info(mrioc,
2318 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
2319 __func__, drv_cmd->dev_handle);
2320 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
2321 drv_cmd->iou_rc);
2322 list_del(&delayed_dev_rmhs->list);
2323 kfree(delayed_dev_rmhs);
2324 return;
2325 }
2326
2327 clear_drv_cmd:
2328 drv_cmd->state = MPI3MR_CMD_NOTUSED;
2329 drv_cmd->callback = NULL;
2330 drv_cmd->retry_count = 0;
2331 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2332 clear_bit(cmd_idx, mrioc->devrem_bitmap);
2333 }
2334
2335 /**
2336 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
2337 * @mrioc: Adapter instance reference
2338 * @drv_cmd: Internal command tracker
2339 *
2340 * Issues a target reset TM to the firmware from the device
2341 * removal TM pend list or issue IO unit control request as
2342 * part of device removal or hidden acknowledgment handshake.
2343 *
2344 * Return: Nothing
2345 */
mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)2346 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
2347 struct mpi3mr_drv_cmd *drv_cmd)
2348 {
2349 struct mpi3_iounit_control_request iou_ctrl;
2350 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2351 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
2352 int retval;
2353
2354 if (drv_cmd->state & MPI3MR_CMD_RESET)
2355 goto clear_drv_cmd;
2356
2357 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
2358 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
2359
2360 if (tm_reply)
2361 pr_info(IOCNAME
2362 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
2363 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
2364 drv_cmd->ioc_loginfo,
2365 le32_to_cpu(tm_reply->termination_count));
2366
2367 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
2368 mrioc->name, drv_cmd->dev_handle, cmd_idx);
2369
2370 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2371
2372 drv_cmd->state = MPI3MR_CMD_PENDING;
2373 drv_cmd->is_waiting = 0;
2374 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
2375 iou_ctrl.operation = drv_cmd->iou_rc;
2376 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
2377 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
2378 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2379
2380 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
2381 1);
2382 if (retval) {
2383 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
2384 mrioc->name);
2385 goto clear_drv_cmd;
2386 }
2387
2388 return;
2389 clear_drv_cmd:
2390 drv_cmd->state = MPI3MR_CMD_NOTUSED;
2391 drv_cmd->callback = NULL;
2392 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2393 drv_cmd->retry_count = 0;
2394 clear_bit(cmd_idx, mrioc->devrem_bitmap);
2395 }
2396
2397 /**
2398 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
2399 * @mrioc: Adapter instance reference
2400 * @handle: Device handle
2401 * @cmdparam: Internal command tracker
2402 * @iou_rc: IO unit reason code
2403 *
2404 * Issues a target reset TM to the firmware or add it to a pend
2405 * list as part of device removal or hidden acknowledgment
2406 * handshake.
2407 *
2408 * Return: Nothing
2409 */
mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc * mrioc,u16 handle,struct mpi3mr_drv_cmd * cmdparam,u8 iou_rc)2410 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
2411 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
2412 {
2413 struct mpi3_scsi_task_mgmt_request tm_req;
2414 int retval = 0;
2415 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
2416 u8 retrycount = 5;
2417 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
2418 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
2419 struct mpi3mr_tgt_dev *tgtdev = NULL;
2420 unsigned long flags;
2421
2422 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2423 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2424 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE))
2425 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
2426 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2427
2428 if (drv_cmd)
2429 goto issue_cmd;
2430 do {
2431 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
2432 MPI3MR_NUM_DEVRMCMD);
2433 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
2434 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
2435 break;
2436 cmd_idx = MPI3MR_NUM_DEVRMCMD;
2437 }
2438 } while (retrycount--);
2439
2440 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
2441 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
2442 GFP_ATOMIC);
2443 if (!delayed_dev_rmhs)
2444 return;
2445 INIT_LIST_HEAD(&delayed_dev_rmhs->list);
2446 delayed_dev_rmhs->handle = handle;
2447 delayed_dev_rmhs->iou_rc = iou_rc;
2448 list_add_tail(&delayed_dev_rmhs->list,
2449 &mrioc->delayed_rmhs_list);
2450 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
2451 __func__, handle);
2452 return;
2453 }
2454 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
2455
2456 issue_cmd:
2457 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2458 ioc_info(mrioc,
2459 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
2460 __func__, handle, cmd_idx);
2461
2462 memset(&tm_req, 0, sizeof(tm_req));
2463 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
2464 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
2465 goto out;
2466 }
2467 drv_cmd->state = MPI3MR_CMD_PENDING;
2468 drv_cmd->is_waiting = 0;
2469 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
2470 drv_cmd->dev_handle = handle;
2471 drv_cmd->iou_rc = iou_rc;
2472 tm_req.dev_handle = cpu_to_le16(handle);
2473 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2474 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
2475 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
2476 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
2477
2478 set_bit(handle, mrioc->removepend_bitmap);
2479 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
2480 if (retval) {
2481 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
2482 __func__);
2483 goto out_failed;
2484 }
2485 out:
2486 return;
2487 out_failed:
2488 drv_cmd->state = MPI3MR_CMD_NOTUSED;
2489 drv_cmd->callback = NULL;
2490 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2491 drv_cmd->retry_count = 0;
2492 clear_bit(cmd_idx, mrioc->devrem_bitmap);
2493 }
2494
2495 /**
2496 * mpi3mr_complete_evt_ack - event ack request completion
2497 * @mrioc: Adapter instance reference
2498 * @drv_cmd: Internal command tracker
2499 *
2500 * This is the completion handler for non blocking event
2501 * acknowledgment sent to the firmware and this will issue any
2502 * pending event acknowledgment request.
2503 *
2504 * Return: Nothing
2505 */
mpi3mr_complete_evt_ack(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)2506 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
2507 struct mpi3mr_drv_cmd *drv_cmd)
2508 {
2509 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
2510 struct delayed_evt_ack_node *delayed_evtack = NULL;
2511
2512 if (drv_cmd->state & MPI3MR_CMD_RESET)
2513 goto clear_drv_cmd;
2514
2515 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
2516 dprint_event_th(mrioc,
2517 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
2518 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2519 drv_cmd->ioc_loginfo);
2520 }
2521
2522 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
2523 delayed_evtack =
2524 list_entry(mrioc->delayed_evtack_cmds_list.next,
2525 struct delayed_evt_ack_node, list);
2526 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
2527 delayed_evtack->event_ctx);
2528 list_del(&delayed_evtack->list);
2529 kfree(delayed_evtack);
2530 return;
2531 }
2532 clear_drv_cmd:
2533 drv_cmd->state = MPI3MR_CMD_NOTUSED;
2534 drv_cmd->callback = NULL;
2535 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
2536 }
2537
2538 /**
2539 * mpi3mr_send_event_ack - Issue event acknwoledgment request
2540 * @mrioc: Adapter instance reference
2541 * @event: MPI3 event id
2542 * @cmdparam: Internal command tracker
2543 * @event_ctx: event context
2544 *
2545 * Issues event acknowledgment request to the firmware if there
2546 * is a free command to send the event ack else it to a pend
2547 * list so that it will be processed on a completion of a prior
2548 * event acknowledgment .
2549 *
2550 * Return: Nothing
2551 */
mpi3mr_send_event_ack(struct mpi3mr_ioc * mrioc,u8 event,struct mpi3mr_drv_cmd * cmdparam,u32 event_ctx)2552 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
2553 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
2554 {
2555 struct mpi3_event_ack_request evtack_req;
2556 int retval = 0;
2557 u8 retrycount = 5;
2558 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
2559 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
2560 struct delayed_evt_ack_node *delayed_evtack = NULL;
2561
2562 if (drv_cmd) {
2563 dprint_event_th(mrioc,
2564 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
2565 event, event_ctx);
2566 goto issue_cmd;
2567 }
2568 dprint_event_th(mrioc,
2569 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
2570 event, event_ctx);
2571 do {
2572 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
2573 MPI3MR_NUM_EVTACKCMD);
2574 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
2575 if (!test_and_set_bit(cmd_idx,
2576 mrioc->evtack_cmds_bitmap))
2577 break;
2578 cmd_idx = MPI3MR_NUM_EVTACKCMD;
2579 }
2580 } while (retrycount--);
2581
2582 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
2583 delayed_evtack = kzalloc(sizeof(*delayed_evtack),
2584 GFP_ATOMIC);
2585 if (!delayed_evtack)
2586 return;
2587 INIT_LIST_HEAD(&delayed_evtack->list);
2588 delayed_evtack->event = event;
2589 delayed_evtack->event_ctx = event_ctx;
2590 list_add_tail(&delayed_evtack->list,
2591 &mrioc->delayed_evtack_cmds_list);
2592 dprint_event_th(mrioc,
2593 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
2594 event, event_ctx);
2595 return;
2596 }
2597 drv_cmd = &mrioc->evtack_cmds[cmd_idx];
2598
2599 issue_cmd:
2600 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
2601
2602 memset(&evtack_req, 0, sizeof(evtack_req));
2603 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
2604 dprint_event_th(mrioc,
2605 "sending event ack failed due to command in use\n");
2606 goto out;
2607 }
2608 drv_cmd->state = MPI3MR_CMD_PENDING;
2609 drv_cmd->is_waiting = 0;
2610 drv_cmd->callback = mpi3mr_complete_evt_ack;
2611 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
2612 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
2613 evtack_req.event = event;
2614 evtack_req.event_context = cpu_to_le32(event_ctx);
2615 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
2616 sizeof(evtack_req), 1);
2617 if (retval) {
2618 dprint_event_th(mrioc,
2619 "posting event ack request is failed\n");
2620 goto out_failed;
2621 }
2622
2623 dprint_event_th(mrioc,
2624 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
2625 event, event_ctx);
2626 out:
2627 return;
2628 out_failed:
2629 drv_cmd->state = MPI3MR_CMD_NOTUSED;
2630 drv_cmd->callback = NULL;
2631 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
2632 }
2633
2634 /**
2635 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
2636 * @mrioc: Adapter instance reference
2637 * @event_reply: event data
2638 *
2639 * Checks for the reason code and based on that either block I/O
2640 * to device, or unblock I/O to the device, or start the device
2641 * removal handshake with reason as remove with the firmware for
2642 * PCIe devices.
2643 *
2644 * Return: Nothing
2645 */
mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2646 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
2647 struct mpi3_event_notification_reply *event_reply)
2648 {
2649 struct mpi3_event_data_pcie_topology_change_list *topo_evt =
2650 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
2651 int i;
2652 u16 handle;
2653 u8 reason_code;
2654 struct mpi3mr_tgt_dev *tgtdev = NULL;
2655 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2656
2657 for (i = 0; i < topo_evt->num_entries; i++) {
2658 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
2659 if (!handle)
2660 continue;
2661 reason_code = topo_evt->port_entry[i].port_status;
2662 scsi_tgt_priv_data = NULL;
2663 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2664 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2665 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2666 tgtdev->starget->hostdata;
2667 switch (reason_code) {
2668 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
2669 if (scsi_tgt_priv_data) {
2670 scsi_tgt_priv_data->dev_removed = 1;
2671 scsi_tgt_priv_data->dev_removedelay = 0;
2672 atomic_set(&scsi_tgt_priv_data->block_io, 0);
2673 }
2674 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2675 MPI3_CTRL_OP_REMOVE_DEVICE);
2676 break;
2677 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
2678 if (scsi_tgt_priv_data) {
2679 scsi_tgt_priv_data->dev_removedelay = 1;
2680 atomic_inc(&scsi_tgt_priv_data->block_io);
2681 }
2682 break;
2683 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
2684 if (scsi_tgt_priv_data &&
2685 scsi_tgt_priv_data->dev_removedelay) {
2686 scsi_tgt_priv_data->dev_removedelay = 0;
2687 atomic_dec_if_positive
2688 (&scsi_tgt_priv_data->block_io);
2689 }
2690 break;
2691 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
2692 default:
2693 break;
2694 }
2695 if (tgtdev)
2696 mpi3mr_tgtdev_put(tgtdev);
2697 }
2698 }
2699
2700 /**
2701 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
2702 * @mrioc: Adapter instance reference
2703 * @event_reply: event data
2704 *
2705 * Checks for the reason code and based on that either block I/O
2706 * to device, or unblock I/O to the device, or start the device
2707 * removal handshake with reason as remove with the firmware for
2708 * SAS/SATA devices.
2709 *
2710 * Return: Nothing
2711 */
mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2712 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
2713 struct mpi3_event_notification_reply *event_reply)
2714 {
2715 struct mpi3_event_data_sas_topology_change_list *topo_evt =
2716 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
2717 int i;
2718 u16 handle;
2719 u8 reason_code;
2720 struct mpi3mr_tgt_dev *tgtdev = NULL;
2721 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2722
2723 for (i = 0; i < topo_evt->num_entries; i++) {
2724 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
2725 if (!handle)
2726 continue;
2727 reason_code = topo_evt->phy_entry[i].status &
2728 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
2729 scsi_tgt_priv_data = NULL;
2730 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2731 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2732 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2733 tgtdev->starget->hostdata;
2734 switch (reason_code) {
2735 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
2736 if (scsi_tgt_priv_data) {
2737 scsi_tgt_priv_data->dev_removed = 1;
2738 scsi_tgt_priv_data->dev_removedelay = 0;
2739 atomic_set(&scsi_tgt_priv_data->block_io, 0);
2740 }
2741 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2742 MPI3_CTRL_OP_REMOVE_DEVICE);
2743 break;
2744 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
2745 if (scsi_tgt_priv_data) {
2746 scsi_tgt_priv_data->dev_removedelay = 1;
2747 atomic_inc(&scsi_tgt_priv_data->block_io);
2748 }
2749 break;
2750 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
2751 if (scsi_tgt_priv_data &&
2752 scsi_tgt_priv_data->dev_removedelay) {
2753 scsi_tgt_priv_data->dev_removedelay = 0;
2754 atomic_dec_if_positive
2755 (&scsi_tgt_priv_data->block_io);
2756 }
2757 break;
2758 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
2759 default:
2760 break;
2761 }
2762 if (tgtdev)
2763 mpi3mr_tgtdev_put(tgtdev);
2764 }
2765 }
2766
2767 /**
2768 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
2769 * @mrioc: Adapter instance reference
2770 * @event_reply: event data
2771 *
2772 * Checks for the reason code and based on that either block I/O
2773 * to device, or unblock I/O to the device, or start the device
2774 * removal handshake with reason as remove/hide acknowledgment
2775 * with the firmware.
2776 *
2777 * Return: Nothing
2778 */
mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2779 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
2780 struct mpi3_event_notification_reply *event_reply)
2781 {
2782 u16 dev_handle = 0;
2783 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
2784 struct mpi3mr_tgt_dev *tgtdev = NULL;
2785 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2786 struct mpi3_event_data_device_status_change *evtdata =
2787 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
2788
2789 if (mrioc->stop_drv_processing)
2790 goto out;
2791
2792 dev_handle = le16_to_cpu(evtdata->dev_handle);
2793 dprint_event_th(mrioc,
2794 "device status change event top half with rc(0x%02x) for handle(0x%04x)\n",
2795 evtdata->reason_code, dev_handle);
2796
2797 switch (evtdata->reason_code) {
2798 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
2799 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
2800 block = 1;
2801 break;
2802 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
2803 delete = 1;
2804 hide = 1;
2805 break;
2806 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
2807 delete = 1;
2808 remove = 1;
2809 break;
2810 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
2811 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
2812 ublock = 1;
2813 break;
2814 default:
2815 break;
2816 }
2817
2818 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2819 if (!tgtdev) {
2820 dprint_event_th(mrioc,
2821 "processing device status change event could not identify device for handle(0x%04x)\n",
2822 dev_handle);
2823 goto out;
2824 }
2825 if (hide)
2826 tgtdev->is_hidden = hide;
2827 if (tgtdev->starget && tgtdev->starget->hostdata) {
2828 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2829 tgtdev->starget->hostdata;
2830 if (block)
2831 atomic_inc(&scsi_tgt_priv_data->block_io);
2832 if (delete)
2833 scsi_tgt_priv_data->dev_removed = 1;
2834 if (ublock)
2835 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
2836 }
2837 if (remove)
2838 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2839 MPI3_CTRL_OP_REMOVE_DEVICE);
2840 if (hide)
2841 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2842 MPI3_CTRL_OP_HIDDEN_ACK);
2843
2844 out:
2845 if (tgtdev)
2846 mpi3mr_tgtdev_put(tgtdev);
2847 }
2848
2849 /**
2850 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
2851 * @mrioc: Adapter instance reference
2852 * @event_reply: event data
2853 *
2854 * Blocks and unblocks host level I/O based on the reason code
2855 *
2856 * Return: Nothing
2857 */
mpi3mr_preparereset_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2858 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
2859 struct mpi3_event_notification_reply *event_reply)
2860 {
2861 struct mpi3_event_data_prepare_for_reset *evtdata =
2862 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
2863
2864 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
2865 dprint_event_th(mrioc,
2866 "prepare for reset event top half with rc=start\n");
2867 if (mrioc->prepare_for_reset)
2868 return;
2869 mrioc->prepare_for_reset = 1;
2870 mrioc->prepare_for_reset_timeout_counter = 0;
2871 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
2872 dprint_event_th(mrioc,
2873 "prepare for reset top half with rc=abort\n");
2874 mrioc->prepare_for_reset = 0;
2875 mrioc->prepare_for_reset_timeout_counter = 0;
2876 }
2877 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2878 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2879 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
2880 le32_to_cpu(event_reply->event_context));
2881 }
2882
2883 /**
2884 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
2885 * @mrioc: Adapter instance reference
2886 * @event_reply: event data
2887 *
2888 * Identifies the new shutdown timeout value and update.
2889 *
2890 * Return: Nothing
2891 */
mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2892 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
2893 struct mpi3_event_notification_reply *event_reply)
2894 {
2895 struct mpi3_event_data_energy_pack_change *evtdata =
2896 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
2897 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
2898
2899 if (shutdown_timeout <= 0) {
2900 dprint_event_th(mrioc,
2901 "%s :Invalid Shutdown Timeout received = %d\n",
2902 __func__, shutdown_timeout);
2903 return;
2904 }
2905
2906 dprint_event_th(mrioc,
2907 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
2908 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
2909 mrioc->facts.shutdown_timeout = shutdown_timeout;
2910 }
2911
2912 /**
2913 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
2914 * @mrioc: Adapter instance reference
2915 * @event_reply: event data
2916 *
2917 * Displays Cable manegemt event details.
2918 *
2919 * Return: Nothing
2920 */
mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2921 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
2922 struct mpi3_event_notification_reply *event_reply)
2923 {
2924 struct mpi3_event_data_cable_management *evtdata =
2925 (struct mpi3_event_data_cable_management *)event_reply->event_data;
2926
2927 switch (evtdata->status) {
2928 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
2929 {
2930 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
2931 "Devices connected to this cable are not detected.\n"
2932 "This cable requires %d mW of power.\n",
2933 evtdata->receptacle_id,
2934 le32_to_cpu(evtdata->active_cable_power_requirement));
2935 break;
2936 }
2937 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
2938 {
2939 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
2940 evtdata->receptacle_id);
2941 break;
2942 }
2943 default:
2944 break;
2945 }
2946 }
2947
2948 /**
2949 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
2950 * @mrioc: Adapter instance reference
2951 *
2952 * Add driver specific event to make sure that the driver won't process the
2953 * events until all the devices are refreshed during soft reset.
2954 *
2955 * Return: Nothing
2956 */
mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc * mrioc)2957 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
2958 {
2959 struct mpi3mr_fwevt *fwevt = NULL;
2960
2961 fwevt = mpi3mr_alloc_fwevt(0);
2962 if (!fwevt) {
2963 dprint_event_th(mrioc,
2964 "failed to schedule bottom half handler for event(0x%02x)\n",
2965 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
2966 return;
2967 }
2968 fwevt->mrioc = mrioc;
2969 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
2970 fwevt->send_ack = 0;
2971 fwevt->process_evt = 1;
2972 fwevt->evt_ctx = 0;
2973 fwevt->event_data_size = 0;
2974 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2975 }
2976
2977 /**
2978 * mpi3mr_os_handle_events - Firmware event handler
2979 * @mrioc: Adapter instance reference
2980 * @event_reply: event data
2981 *
2982 * Identifies whether the event has to be handled and acknowledged,
2983 * and either processes the event in the top-half and/or schedule a
2984 * bottom-half through mpi3mr_fwevt_worker().
2985 *
2986 * Return: Nothing
2987 */
mpi3mr_os_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2988 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
2989 struct mpi3_event_notification_reply *event_reply)
2990 {
2991 u16 evt_type, sz;
2992 struct mpi3mr_fwevt *fwevt = NULL;
2993 bool ack_req = 0, process_evt_bh = 0;
2994
2995 if (mrioc->stop_drv_processing)
2996 return;
2997
2998 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2999 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
3000 ack_req = 1;
3001
3002 evt_type = event_reply->event;
3003 mpi3mr_event_trigger(mrioc, event_reply->event);
3004
3005 switch (evt_type) {
3006 case MPI3_EVENT_DEVICE_ADDED:
3007 {
3008 struct mpi3_device_page0 *dev_pg0 =
3009 (struct mpi3_device_page0 *)event_reply->event_data;
3010 if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
3011 dprint_event_th(mrioc,
3012 "failed to process device added event for handle(0x%04x),\n"
3013 "perst_id(%d) in the event top half handler\n",
3014 le16_to_cpu(dev_pg0->dev_handle),
3015 le16_to_cpu(dev_pg0->persistent_id));
3016 else
3017 process_evt_bh = 1;
3018 break;
3019 }
3020 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
3021 {
3022 process_evt_bh = 1;
3023 mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
3024 break;
3025 }
3026 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
3027 {
3028 process_evt_bh = 1;
3029 mpi3mr_sastopochg_evt_th(mrioc, event_reply);
3030 break;
3031 }
3032 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3033 {
3034 process_evt_bh = 1;
3035 mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
3036 break;
3037 }
3038 case MPI3_EVENT_PREPARE_FOR_RESET:
3039 {
3040 mpi3mr_preparereset_evt_th(mrioc, event_reply);
3041 ack_req = 0;
3042 break;
3043 }
3044 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
3045 {
3046 mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply);
3047 break;
3048 }
3049 case MPI3_EVENT_DEVICE_INFO_CHANGED:
3050 case MPI3_EVENT_LOG_DATA:
3051 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3052 case MPI3_EVENT_ENCL_DEVICE_ADDED:
3053 {
3054 process_evt_bh = 1;
3055 break;
3056 }
3057 case MPI3_EVENT_ENERGY_PACK_CHANGE:
3058 {
3059 mpi3mr_energypackchg_evt_th(mrioc, event_reply);
3060 break;
3061 }
3062 case MPI3_EVENT_CABLE_MGMT:
3063 {
3064 mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
3065 break;
3066 }
3067 case MPI3_EVENT_SAS_DISCOVERY:
3068 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3069 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3070 case MPI3_EVENT_PCIE_ENUMERATION:
3071 break;
3072 default:
3073 ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
3074 __func__, evt_type);
3075 break;
3076 }
3077 if (process_evt_bh || ack_req) {
3078 dprint_event_th(mrioc,
3079 "scheduling bottom half handler for event(0x%02x),ack_required=%d\n",
3080 evt_type, ack_req);
3081 sz = event_reply->event_data_length * 4;
3082 fwevt = mpi3mr_alloc_fwevt(sz);
3083 if (!fwevt) {
3084 dprint_event_th(mrioc,
3085 "failed to schedule bottom half handler for\n"
3086 "event(0x%02x), ack_required=%d\n", evt_type, ack_req);
3087 return;
3088 }
3089
3090 memcpy(fwevt->event_data, event_reply->event_data, sz);
3091 fwevt->mrioc = mrioc;
3092 fwevt->event_id = evt_type;
3093 fwevt->send_ack = ack_req;
3094 fwevt->process_evt = process_evt_bh;
3095 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
3096 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
3097 }
3098 }
3099
3100 /**
3101 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
3102 * @mrioc: Adapter instance reference
3103 * @scmd: SCSI command reference
3104 * @scsiio_req: MPI3 SCSI IO request
3105 *
3106 * Identifies the protection information flags from the SCSI
3107 * command and set appropriate flags in the MPI3 SCSI IO
3108 * request.
3109 *
3110 * Return: Nothing
3111 */
mpi3mr_setup_eedp(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req)3112 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
3113 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3114 {
3115 u16 eedp_flags = 0;
3116 unsigned char prot_op = scsi_get_prot_op(scmd);
3117
3118 switch (prot_op) {
3119 case SCSI_PROT_NORMAL:
3120 return;
3121 case SCSI_PROT_READ_STRIP:
3122 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
3123 break;
3124 case SCSI_PROT_WRITE_INSERT:
3125 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
3126 break;
3127 case SCSI_PROT_READ_INSERT:
3128 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
3129 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3130 break;
3131 case SCSI_PROT_WRITE_STRIP:
3132 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
3133 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3134 break;
3135 case SCSI_PROT_READ_PASS:
3136 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
3137 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3138 break;
3139 case SCSI_PROT_WRITE_PASS:
3140 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
3141 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
3142 scsiio_req->sgl[0].eedp.application_tag_translation_mask =
3143 0xffff;
3144 } else
3145 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
3146
3147 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3148 break;
3149 default:
3150 return;
3151 }
3152
3153 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
3154 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
3155
3156 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
3157 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
3158
3159 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
3160 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
3161 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
3162 scsiio_req->cdb.eedp32.primary_reference_tag =
3163 cpu_to_be32(scsi_prot_ref_tag(scmd));
3164 }
3165
3166 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
3167 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
3168
3169 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
3170
3171 switch (scsi_prot_interval(scmd)) {
3172 case 512:
3173 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
3174 break;
3175 case 520:
3176 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
3177 break;
3178 case 4080:
3179 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
3180 break;
3181 case 4088:
3182 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
3183 break;
3184 case 4096:
3185 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
3186 break;
3187 case 4104:
3188 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
3189 break;
3190 case 4160:
3191 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
3192 break;
3193 default:
3194 break;
3195 }
3196
3197 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
3198 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
3199 }
3200
3201 /**
3202 * mpi3mr_build_sense_buffer - Map sense information
3203 * @desc: Sense type
3204 * @buf: Sense buffer to populate
3205 * @key: Sense key
3206 * @asc: Additional sense code
3207 * @ascq: Additional sense code qualifier
3208 *
3209 * Maps the given sense information into either descriptor or
3210 * fixed format sense data.
3211 *
3212 * Return: Nothing
3213 */
mpi3mr_build_sense_buffer(int desc,u8 * buf,u8 key,u8 asc,u8 ascq)3214 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
3215 u8 asc, u8 ascq)
3216 {
3217 if (desc) {
3218 buf[0] = 0x72; /* descriptor, current */
3219 buf[1] = key;
3220 buf[2] = asc;
3221 buf[3] = ascq;
3222 buf[7] = 0;
3223 } else {
3224 buf[0] = 0x70; /* fixed, current */
3225 buf[2] = key;
3226 buf[7] = 0xa;
3227 buf[12] = asc;
3228 buf[13] = ascq;
3229 }
3230 }
3231
3232 /**
3233 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
3234 * @scmd: SCSI command reference
3235 * @ioc_status: status of MPI3 request
3236 *
3237 * Maps the EEDP error status of the SCSI IO request to sense
3238 * data.
3239 *
3240 * Return: Nothing
3241 */
mpi3mr_map_eedp_error(struct scsi_cmnd * scmd,u16 ioc_status)3242 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
3243 u16 ioc_status)
3244 {
3245 u8 ascq = 0;
3246
3247 switch (ioc_status) {
3248 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
3249 ascq = 0x01;
3250 break;
3251 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
3252 ascq = 0x02;
3253 break;
3254 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
3255 ascq = 0x03;
3256 break;
3257 default:
3258 ascq = 0x00;
3259 break;
3260 }
3261
3262 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3263 0x10, ascq);
3264 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
3265 }
3266
3267 /**
3268 * mpi3mr_process_op_reply_desc - reply descriptor handler
3269 * @mrioc: Adapter instance reference
3270 * @reply_desc: Operational reply descriptor
3271 * @reply_dma: place holder for reply DMA address
3272 * @qidx: Operational queue index
3273 *
3274 * Process the operational reply descriptor and identifies the
3275 * descriptor type. Based on the descriptor map the MPI3 request
3276 * status to a SCSI command status and calls scsi_done call
3277 * back.
3278 *
3279 * Return: Nothing
3280 */
mpi3mr_process_op_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma,u16 qidx)3281 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
3282 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
3283 {
3284 u16 reply_desc_type, host_tag = 0;
3285 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
3286 u32 ioc_loginfo = 0;
3287 struct mpi3_status_reply_descriptor *status_desc = NULL;
3288 struct mpi3_address_reply_descriptor *addr_desc = NULL;
3289 struct mpi3_success_reply_descriptor *success_desc = NULL;
3290 struct mpi3_scsi_io_reply *scsi_reply = NULL;
3291 struct scsi_cmnd *scmd = NULL;
3292 struct scmd_priv *priv = NULL;
3293 u8 *sense_buf = NULL;
3294 u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
3295 u32 xfer_count = 0, sense_count = 0, resp_data = 0;
3296 u16 dev_handle = 0xFFFF;
3297 struct scsi_sense_hdr sshdr;
3298 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
3299 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3300 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
3301 struct mpi3mr_throttle_group_info *tg = NULL;
3302 u8 throttle_enabled_dev = 0;
3303
3304 *reply_dma = 0;
3305 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
3306 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
3307 switch (reply_desc_type) {
3308 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
3309 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
3310 host_tag = le16_to_cpu(status_desc->host_tag);
3311 ioc_status = le16_to_cpu(status_desc->ioc_status);
3312 if (ioc_status &
3313 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
3314 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
3315 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
3316 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
3317 break;
3318 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
3319 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
3320 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
3321 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
3322 *reply_dma);
3323 if (!scsi_reply) {
3324 panic("%s: scsi_reply is NULL, this shouldn't happen\n",
3325 mrioc->name);
3326 goto out;
3327 }
3328 host_tag = le16_to_cpu(scsi_reply->host_tag);
3329 ioc_status = le16_to_cpu(scsi_reply->ioc_status);
3330 scsi_status = scsi_reply->scsi_status;
3331 scsi_state = scsi_reply->scsi_state;
3332 dev_handle = le16_to_cpu(scsi_reply->dev_handle);
3333 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
3334 xfer_count = le32_to_cpu(scsi_reply->transfer_count);
3335 sense_count = le32_to_cpu(scsi_reply->sense_count);
3336 resp_data = le32_to_cpu(scsi_reply->response_data);
3337 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
3338 le64_to_cpu(scsi_reply->sense_data_buffer_address));
3339 if (ioc_status &
3340 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
3341 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
3342 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
3343 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
3344 panic("%s: Ran out of sense buffers\n", mrioc->name);
3345 if (sense_buf) {
3346 scsi_normalize_sense(sense_buf, sense_count, &sshdr);
3347 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
3348 sshdr.asc, sshdr.ascq);
3349 }
3350 mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
3351 break;
3352 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
3353 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
3354 host_tag = le16_to_cpu(success_desc->host_tag);
3355 break;
3356 default:
3357 break;
3358 }
3359 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
3360 if (!scmd) {
3361 panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
3362 mrioc->name, host_tag);
3363 goto out;
3364 }
3365 priv = scsi_cmd_priv(scmd);
3366
3367 data_len_blks = scsi_bufflen(scmd) >> 9;
3368 sdev_priv_data = scmd->device->hostdata;
3369 if (sdev_priv_data) {
3370 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3371 if (stgt_priv_data) {
3372 tg = stgt_priv_data->throttle_group;
3373 throttle_enabled_dev =
3374 stgt_priv_data->io_throttle_enabled;
3375 dev_handle = stgt_priv_data->dev_handle;
3376 }
3377 }
3378 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
3379 throttle_enabled_dev)) {
3380 ioc_pend_data_len = atomic_sub_return(data_len_blks,
3381 &mrioc->pend_large_data_sz);
3382 if (tg) {
3383 tg_pend_data_len = atomic_sub_return(data_len_blks,
3384 &tg->pend_large_data_sz);
3385 if (tg->io_divert && ((ioc_pend_data_len <=
3386 mrioc->io_throttle_low) &&
3387 (tg_pend_data_len <= tg->low))) {
3388 tg->io_divert = 0;
3389 mpi3mr_set_io_divert_for_all_vd_in_tg(
3390 mrioc, tg, 0);
3391 }
3392 } else {
3393 if (ioc_pend_data_len <= mrioc->io_throttle_low)
3394 stgt_priv_data->io_divert = 0;
3395 }
3396 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
3397 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
3398 if (!tg) {
3399 if (ioc_pend_data_len <= mrioc->io_throttle_low)
3400 stgt_priv_data->io_divert = 0;
3401
3402 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
3403 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
3404 if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
3405 tg->io_divert = 0;
3406 mpi3mr_set_io_divert_for_all_vd_in_tg(
3407 mrioc, tg, 0);
3408 }
3409 }
3410 }
3411
3412 if (success_desc) {
3413 scmd->result = DID_OK << 16;
3414 goto out_success;
3415 }
3416
3417 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
3418 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
3419 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
3420 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
3421 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
3422 ioc_status = MPI3_IOCSTATUS_SUCCESS;
3423
3424 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
3425 sense_buf) {
3426 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
3427
3428 memcpy(scmd->sense_buffer, sense_buf, sz);
3429 }
3430
3431 switch (ioc_status) {
3432 case MPI3_IOCSTATUS_BUSY:
3433 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
3434 scmd->result = SAM_STAT_BUSY;
3435 break;
3436 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3437 scmd->result = DID_NO_CONNECT << 16;
3438 break;
3439 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3440 if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
3441 /*
3442 * This is a ATA NCQ command aborted due to another NCQ
3443 * command failure. We must retry this command
3444 * immediately but without incrementing its retry
3445 * counter.
3446 */
3447 WARN_ON_ONCE(xfer_count != 0);
3448 scmd->result = DID_IMM_RETRY << 16;
3449 } else {
3450 scmd->result = DID_SOFT_ERROR << 16;
3451 }
3452 break;
3453 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
3454 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
3455 scmd->result = DID_RESET << 16;
3456 break;
3457 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3458 if ((xfer_count == 0) || (scmd->underflow > xfer_count))
3459 scmd->result = DID_SOFT_ERROR << 16;
3460 else
3461 scmd->result = (DID_OK << 16) | scsi_status;
3462 break;
3463 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
3464 scmd->result = (DID_OK << 16) | scsi_status;
3465 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
3466 break;
3467 if (xfer_count < scmd->underflow) {
3468 if (scsi_status == SAM_STAT_BUSY)
3469 scmd->result = SAM_STAT_BUSY;
3470 else
3471 scmd->result = DID_SOFT_ERROR << 16;
3472 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
3473 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
3474 scmd->result = DID_SOFT_ERROR << 16;
3475 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
3476 scmd->result = DID_RESET << 16;
3477 break;
3478 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
3479 scsi_set_resid(scmd, 0);
3480 fallthrough;
3481 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
3482 case MPI3_IOCSTATUS_SUCCESS:
3483 scmd->result = (DID_OK << 16) | scsi_status;
3484 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
3485 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
3486 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
3487 scmd->result = DID_SOFT_ERROR << 16;
3488 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
3489 scmd->result = DID_RESET << 16;
3490 break;
3491 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
3492 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
3493 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
3494 mpi3mr_map_eedp_error(scmd, ioc_status);
3495 break;
3496 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3497 case MPI3_IOCSTATUS_INVALID_FUNCTION:
3498 case MPI3_IOCSTATUS_INVALID_SGL:
3499 case MPI3_IOCSTATUS_INTERNAL_ERROR:
3500 case MPI3_IOCSTATUS_INVALID_FIELD:
3501 case MPI3_IOCSTATUS_INVALID_STATE:
3502 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
3503 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3504 case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
3505 default:
3506 scmd->result = DID_SOFT_ERROR << 16;
3507 break;
3508 }
3509
3510 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
3511 (scmd->cmnd[0] != ATA_16) &&
3512 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
3513 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
3514 scmd->result);
3515 scsi_print_command(scmd);
3516 ioc_info(mrioc,
3517 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
3518 __func__, dev_handle, ioc_status, ioc_loginfo,
3519 priv->req_q_idx + 1);
3520 ioc_info(mrioc,
3521 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
3522 host_tag, scsi_state, scsi_status, xfer_count, resp_data);
3523 if (sense_buf) {
3524 scsi_normalize_sense(sense_buf, sense_count, &sshdr);
3525 ioc_info(mrioc,
3526 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
3527 __func__, sense_count, sshdr.sense_key,
3528 sshdr.asc, sshdr.ascq);
3529 }
3530 }
3531 out_success:
3532 if (priv->meta_sg_valid) {
3533 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
3534 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
3535 }
3536 mpi3mr_clear_scmd_priv(mrioc, scmd);
3537 scsi_dma_unmap(scmd);
3538 scsi_done(scmd);
3539 out:
3540 if (sense_buf)
3541 mpi3mr_repost_sense_buf(mrioc,
3542 le64_to_cpu(scsi_reply->sense_data_buffer_address));
3543 }
3544
3545 /**
3546 * mpi3mr_get_chain_idx - get free chain buffer index
3547 * @mrioc: Adapter instance reference
3548 *
3549 * Try to get a free chain buffer index from the free pool.
3550 *
3551 * Return: -1 on failure or the free chain buffer index
3552 */
mpi3mr_get_chain_idx(struct mpi3mr_ioc * mrioc)3553 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
3554 {
3555 u8 retry_count = 5;
3556 int cmd_idx = -1;
3557 unsigned long flags;
3558
3559 spin_lock_irqsave(&mrioc->chain_buf_lock, flags);
3560 do {
3561 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
3562 mrioc->chain_buf_count);
3563 if (cmd_idx < mrioc->chain_buf_count) {
3564 set_bit(cmd_idx, mrioc->chain_bitmap);
3565 break;
3566 }
3567 cmd_idx = -1;
3568 } while (retry_count--);
3569 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags);
3570 return cmd_idx;
3571 }
3572
3573 /**
3574 * mpi3mr_prepare_sg_scmd - build scatter gather list
3575 * @mrioc: Adapter instance reference
3576 * @scmd: SCSI command reference
3577 * @scsiio_req: MPI3 SCSI IO request
3578 *
3579 * This function maps SCSI command's data and protection SGEs to
3580 * MPI request SGEs. If required additional 4K chain buffer is
3581 * used to send the SGEs.
3582 *
3583 * Return: 0 on success, -ENOMEM on dma_map_sg failure
3584 */
mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req)3585 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
3586 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3587 {
3588 dma_addr_t chain_dma;
3589 struct scatterlist *sg_scmd;
3590 void *sg_local, *chain;
3591 u32 chain_length;
3592 int sges_left, chain_idx;
3593 u32 sges_in_segment;
3594 u8 simple_sgl_flags;
3595 u8 simple_sgl_flags_last;
3596 u8 last_chain_sgl_flags;
3597 struct chain_element *chain_req;
3598 struct scmd_priv *priv = NULL;
3599 u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
3600 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
3601
3602 priv = scsi_cmd_priv(scmd);
3603
3604 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
3605 MPI3_SGE_FLAGS_DLAS_SYSTEM;
3606 simple_sgl_flags_last = simple_sgl_flags |
3607 MPI3_SGE_FLAGS_END_OF_LIST;
3608 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
3609 MPI3_SGE_FLAGS_DLAS_SYSTEM;
3610
3611 if (meta_sg)
3612 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
3613 else
3614 sg_local = &scsiio_req->sgl;
3615
3616 if (!scsiio_req->data_length && !meta_sg) {
3617 mpi3mr_build_zero_len_sge(sg_local);
3618 return 0;
3619 }
3620
3621 if (meta_sg) {
3622 sg_scmd = scsi_prot_sglist(scmd);
3623 sges_left = dma_map_sg(&mrioc->pdev->dev,
3624 scsi_prot_sglist(scmd),
3625 scsi_prot_sg_count(scmd),
3626 scmd->sc_data_direction);
3627 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
3628 } else {
3629 /*
3630 * Some firmware versions byte-swap the REPORT ZONES command
3631 * reply from ATA-ZAC devices by directly accessing in the host
3632 * buffer. This does not respect the default command DMA
3633 * direction and causes IOMMU page faults on some architectures
3634 * with an IOMMU enforcing write mappings (e.g. AMD hosts).
3635 * Avoid such issue by making the REPORT ZONES buffer mapping
3636 * bi-directional.
3637 */
3638 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
3639 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
3640 sg_scmd = scsi_sglist(scmd);
3641 sges_left = scsi_dma_map(scmd);
3642 }
3643
3644 if (sges_left < 0) {
3645 sdev_printk(KERN_ERR, scmd->device,
3646 "scsi_dma_map failed: request for %d bytes!\n",
3647 scsi_bufflen(scmd));
3648 return -ENOMEM;
3649 }
3650 if (sges_left > mrioc->max_sgl_entries) {
3651 sdev_printk(KERN_ERR, scmd->device,
3652 "scsi_dma_map returned unsupported sge count %d!\n",
3653 sges_left);
3654 return -ENOMEM;
3655 }
3656
3657 sges_in_segment = (mrioc->facts.op_req_sz -
3658 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
3659
3660 if (scsiio_req->sgl[0].eedp.flags ==
3661 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
3662 sg_local += sizeof(struct mpi3_sge_common);
3663 sges_in_segment--;
3664 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
3665 }
3666
3667 if (scsiio_req->msg_flags ==
3668 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
3669 sges_in_segment--;
3670 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
3671 }
3672
3673 if (meta_sg)
3674 sges_in_segment = 1;
3675
3676 if (sges_left <= sges_in_segment)
3677 goto fill_in_last_segment;
3678
3679 /* fill in main message segment when there is a chain following */
3680 while (sges_in_segment > 1) {
3681 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
3682 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
3683 sg_scmd = sg_next(sg_scmd);
3684 sg_local += sizeof(struct mpi3_sge_common);
3685 sges_left--;
3686 sges_in_segment--;
3687 }
3688
3689 chain_idx = mpi3mr_get_chain_idx(mrioc);
3690 if (chain_idx < 0)
3691 return -1;
3692 chain_req = &mrioc->chain_sgl_list[chain_idx];
3693 if (meta_sg)
3694 priv->meta_chain_idx = chain_idx;
3695 else
3696 priv->chain_idx = chain_idx;
3697
3698 chain = chain_req->addr;
3699 chain_dma = chain_req->dma_addr;
3700 sges_in_segment = sges_left;
3701 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
3702
3703 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
3704 chain_length, chain_dma);
3705
3706 sg_local = chain;
3707
3708 fill_in_last_segment:
3709 while (sges_left > 0) {
3710 if (sges_left == 1)
3711 mpi3mr_add_sg_single(sg_local,
3712 simple_sgl_flags_last, sg_dma_len(sg_scmd),
3713 sg_dma_address(sg_scmd));
3714 else
3715 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
3716 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
3717 sg_scmd = sg_next(sg_scmd);
3718 sg_local += sizeof(struct mpi3_sge_common);
3719 sges_left--;
3720 }
3721
3722 return 0;
3723 }
3724
3725 /**
3726 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
3727 * @mrioc: Adapter instance reference
3728 * @scmd: SCSI command reference
3729 * @scsiio_req: MPI3 SCSI IO request
3730 *
3731 * This function calls mpi3mr_prepare_sg_scmd for constructing
3732 * both data SGEs and protection information SGEs in the MPI
3733 * format from the SCSI Command as appropriate .
3734 *
3735 * Return: return value of mpi3mr_prepare_sg_scmd.
3736 */
mpi3mr_build_sg_scmd(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req)3737 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
3738 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3739 {
3740 int ret;
3741
3742 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
3743 if (ret)
3744 return ret;
3745
3746 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
3747 /* There is a valid meta sg */
3748 scsiio_req->flags |=
3749 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
3750 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
3751 }
3752
3753 return ret;
3754 }
3755
3756 /**
3757 * mpi3mr_tm_response_name - get TM response as a string
3758 * @resp_code: TM response code
3759 *
3760 * Convert known task management response code as a readable
3761 * string.
3762 *
3763 * Return: response code string.
3764 */
mpi3mr_tm_response_name(u8 resp_code)3765 static const char *mpi3mr_tm_response_name(u8 resp_code)
3766 {
3767 char *desc;
3768
3769 switch (resp_code) {
3770 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3771 desc = "task management request completed";
3772 break;
3773 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
3774 desc = "invalid frame";
3775 break;
3776 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
3777 desc = "task management request not supported";
3778 break;
3779 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
3780 desc = "task management request failed";
3781 break;
3782 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3783 desc = "task management request succeeded";
3784 break;
3785 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
3786 desc = "invalid LUN";
3787 break;
3788 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
3789 desc = "overlapped tag attempted";
3790 break;
3791 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3792 desc = "task queued, however not sent to target";
3793 break;
3794 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
3795 desc = "task management request denied by NVMe device";
3796 break;
3797 default:
3798 desc = "unknown";
3799 break;
3800 }
3801
3802 return desc;
3803 }
3804
mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc * mrioc)3805 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
3806 {
3807 int i;
3808 int num_of_reply_queues =
3809 mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
3810
3811 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
3812 mpi3mr_process_op_reply_q(mrioc,
3813 mrioc->intr_info[i].op_reply_q);
3814 }
3815
3816 /**
3817 * mpi3mr_issue_tm - Issue Task Management request
3818 * @mrioc: Adapter instance reference
3819 * @tm_type: Task Management type
3820 * @handle: Device handle
3821 * @lun: lun ID
3822 * @htag: Host tag of the TM request
3823 * @timeout: TM timeout value
3824 * @drv_cmd: Internal command tracker
3825 * @resp_code: Response code place holder
3826 * @scmd: SCSI command
3827 *
3828 * Issues a Task Management Request to the controller for a
3829 * specified target, lun and command and wait for its completion
3830 * and check TM response. Recover the TM if it timed out by
3831 * issuing controller reset.
3832 *
3833 * Return: 0 on success, non-zero on errors
3834 */
mpi3mr_issue_tm(struct mpi3mr_ioc * mrioc,u8 tm_type,u16 handle,uint lun,u16 htag,ulong timeout,struct mpi3mr_drv_cmd * drv_cmd,u8 * resp_code,struct scsi_cmnd * scmd)3835 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
3836 u16 handle, uint lun, u16 htag, ulong timeout,
3837 struct mpi3mr_drv_cmd *drv_cmd,
3838 u8 *resp_code, struct scsi_cmnd *scmd)
3839 {
3840 struct mpi3_scsi_task_mgmt_request tm_req;
3841 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
3842 int retval = 0;
3843 struct mpi3mr_tgt_dev *tgtdev = NULL;
3844 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
3845 struct scmd_priv *cmd_priv = NULL;
3846 struct scsi_device *sdev = NULL;
3847 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3848
3849 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
3850 __func__, tm_type, handle);
3851 if (mrioc->unrecoverable) {
3852 retval = -1;
3853 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
3854 __func__);
3855 goto out;
3856 }
3857
3858 memset(&tm_req, 0, sizeof(tm_req));
3859 mutex_lock(&drv_cmd->mutex);
3860 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3861 retval = -1;
3862 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
3863 mutex_unlock(&drv_cmd->mutex);
3864 goto out;
3865 }
3866 if (mrioc->reset_in_progress) {
3867 retval = -1;
3868 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
3869 mutex_unlock(&drv_cmd->mutex);
3870 goto out;
3871 }
3872 if (mrioc->block_on_pci_err) {
3873 retval = -1;
3874 dprint_tm(mrioc, "sending task management failed due to\n"
3875 "pci error recovery in progress\n");
3876 mutex_unlock(&drv_cmd->mutex);
3877 goto out;
3878 }
3879
3880 drv_cmd->state = MPI3MR_CMD_PENDING;
3881 drv_cmd->is_waiting = 1;
3882 drv_cmd->callback = NULL;
3883 tm_req.dev_handle = cpu_to_le16(handle);
3884 tm_req.task_type = tm_type;
3885 tm_req.host_tag = cpu_to_le16(htag);
3886
3887 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
3888 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3889
3890 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
3891
3892 if (scmd) {
3893 if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
3894 cmd_priv = scsi_cmd_priv(scmd);
3895 if (!cmd_priv)
3896 goto out_unlock;
3897
3898 struct op_req_qinfo *op_req_q;
3899
3900 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
3901 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
3902 tm_req.task_request_queue_id =
3903 cpu_to_le16(op_req_q->qid);
3904 }
3905 sdev = scmd->device;
3906 sdev_priv_data = sdev->hostdata;
3907 scsi_tgt_priv_data = ((sdev_priv_data) ?
3908 sdev_priv_data->tgt_priv_data : NULL);
3909 } else {
3910 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
3911 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
3912 tgtdev->starget->hostdata;
3913 }
3914
3915 if (scsi_tgt_priv_data)
3916 atomic_inc(&scsi_tgt_priv_data->block_io);
3917
3918 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
3919 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
3920 timeout = tgtdev->dev_spec.pcie_inf.abort_to;
3921 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
3922 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
3923 }
3924
3925 init_completion(&drv_cmd->done);
3926 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
3927 if (retval) {
3928 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
3929 goto out_unlock;
3930 }
3931 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
3932
3933 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
3934 drv_cmd->is_waiting = 0;
3935 retval = -1;
3936 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
3937 dprint_tm(mrioc,
3938 "task management request timed out after %ld seconds\n",
3939 timeout);
3940 if (mrioc->logging_level & MPI3_DEBUG_TM)
3941 dprint_dump_req(&tm_req, sizeof(tm_req)/4);
3942 mpi3mr_soft_reset_handler(mrioc,
3943 MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
3944 }
3945 goto out_unlock;
3946 }
3947
3948 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
3949 dprint_tm(mrioc, "invalid task management reply message\n");
3950 retval = -1;
3951 goto out_unlock;
3952 }
3953
3954 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
3955
3956 switch (drv_cmd->ioc_status) {
3957 case MPI3_IOCSTATUS_SUCCESS:
3958 *resp_code = le32_to_cpu(tm_reply->response_data) &
3959 MPI3MR_RI_MASK_RESPCODE;
3960 break;
3961 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3962 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
3963 break;
3964 default:
3965 dprint_tm(mrioc,
3966 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
3967 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
3968 retval = -1;
3969 goto out_unlock;
3970 }
3971
3972 switch (*resp_code) {
3973 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3974 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3975 break;
3976 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3977 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3978 retval = -1;
3979 break;
3980 default:
3981 retval = -1;
3982 break;
3983 }
3984
3985 dprint_tm(mrioc,
3986 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
3987 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
3988 le32_to_cpu(tm_reply->termination_count),
3989 mpi3mr_tm_response_name(*resp_code), *resp_code);
3990
3991 if (!retval) {
3992 mpi3mr_ioc_disable_intr(mrioc);
3993 mpi3mr_poll_pend_io_completions(mrioc);
3994 mpi3mr_ioc_enable_intr(mrioc);
3995 mpi3mr_poll_pend_io_completions(mrioc);
3996 mpi3mr_process_admin_reply_q(mrioc);
3997 }
3998 switch (tm_type) {
3999 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
4000 if (!scsi_tgt_priv_data)
4001 break;
4002 scsi_tgt_priv_data->pend_count = 0;
4003 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
4004 mpi3mr_count_tgt_pending,
4005 (void *)scsi_tgt_priv_data->starget);
4006 break;
4007 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
4008 if (!sdev_priv_data)
4009 break;
4010 sdev_priv_data->pend_count = 0;
4011 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
4012 mpi3mr_count_dev_pending, (void *)sdev);
4013 break;
4014 default:
4015 break;
4016 }
4017 mpi3mr_global_trigger(mrioc,
4018 MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED);
4019
4020 out_unlock:
4021 drv_cmd->state = MPI3MR_CMD_NOTUSED;
4022 mutex_unlock(&drv_cmd->mutex);
4023 if (scsi_tgt_priv_data)
4024 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
4025 if (tgtdev)
4026 mpi3mr_tgtdev_put(tgtdev);
4027 out:
4028 return retval;
4029 }
4030
4031 /**
4032 * mpi3mr_bios_param - BIOS param callback
4033 * @sdev: SCSI device reference
4034 * @bdev: Block device reference
4035 * @capacity: Capacity in logical sectors
4036 * @params: Parameter array
4037 *
4038 * Just the parameters with heads/secots/cylinders.
4039 *
4040 * Return: 0 always
4041 */
mpi3mr_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])4042 static int mpi3mr_bios_param(struct scsi_device *sdev,
4043 struct block_device *bdev, sector_t capacity, int params[])
4044 {
4045 int heads;
4046 int sectors;
4047 sector_t cylinders;
4048 ulong dummy;
4049
4050 heads = 64;
4051 sectors = 32;
4052
4053 dummy = heads * sectors;
4054 cylinders = capacity;
4055 sector_div(cylinders, dummy);
4056
4057 if ((ulong)capacity >= 0x200000) {
4058 heads = 255;
4059 sectors = 63;
4060 dummy = heads * sectors;
4061 cylinders = capacity;
4062 sector_div(cylinders, dummy);
4063 }
4064
4065 params[0] = heads;
4066 params[1] = sectors;
4067 params[2] = cylinders;
4068 return 0;
4069 }
4070
4071 /**
4072 * mpi3mr_map_queues - Map queues callback handler
4073 * @shost: SCSI host reference
4074 *
4075 * Maps default and poll queues.
4076 *
4077 * Return: return zero.
4078 */
mpi3mr_map_queues(struct Scsi_Host * shost)4079 static void mpi3mr_map_queues(struct Scsi_Host *shost)
4080 {
4081 struct mpi3mr_ioc *mrioc = shost_priv(shost);
4082 int i, qoff, offset;
4083 struct blk_mq_queue_map *map = NULL;
4084
4085 offset = mrioc->op_reply_q_offset;
4086
4087 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
4088 map = &shost->tag_set.map[i];
4089
4090 map->nr_queues = 0;
4091
4092 if (i == HCTX_TYPE_DEFAULT)
4093 map->nr_queues = mrioc->default_qcount;
4094 else if (i == HCTX_TYPE_POLL)
4095 map->nr_queues = mrioc->active_poll_qcount;
4096
4097 if (!map->nr_queues) {
4098 BUG_ON(i == HCTX_TYPE_DEFAULT);
4099 continue;
4100 }
4101
4102 /*
4103 * The poll queue(s) doesn't have an IRQ (and hence IRQ
4104 * affinity), so use the regular blk-mq cpu mapping
4105 */
4106 map->queue_offset = qoff;
4107 if (i != HCTX_TYPE_POLL)
4108 blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
4109 else
4110 blk_mq_map_queues(map);
4111
4112 qoff += map->nr_queues;
4113 offset += map->nr_queues;
4114 }
4115 }
4116
4117 /**
4118 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
4119 * @mrioc: Adapter instance reference
4120 *
4121 * Calculate the pending I/Os for the controller and return.
4122 *
4123 * Return: Number of pending I/Os
4124 */
mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc * mrioc)4125 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
4126 {
4127 u16 i;
4128 uint pend_ios = 0;
4129
4130 for (i = 0; i < mrioc->num_op_reply_q; i++)
4131 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
4132 return pend_ios;
4133 }
4134
4135 /**
4136 * mpi3mr_print_pending_host_io - print pending I/Os
4137 * @mrioc: Adapter instance reference
4138 *
4139 * Print number of pending I/Os and each I/O details prior to
4140 * reset for debug purpose.
4141 *
4142 * Return: Nothing
4143 */
mpi3mr_print_pending_host_io(struct mpi3mr_ioc * mrioc)4144 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
4145 {
4146 struct Scsi_Host *shost = mrioc->shost;
4147
4148 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
4149 __func__, mpi3mr_get_fw_pending_ios(mrioc));
4150 blk_mq_tagset_busy_iter(&shost->tag_set,
4151 mpi3mr_print_scmd, (void *)mrioc);
4152 }
4153
4154 /**
4155 * mpi3mr_wait_for_host_io - block for I/Os to complete
4156 * @mrioc: Adapter instance reference
4157 * @timeout: time out in seconds
4158 * Waits for pending I/Os for the given adapter to complete or
4159 * to hit the timeout.
4160 *
4161 * Return: Nothing
4162 */
mpi3mr_wait_for_host_io(struct mpi3mr_ioc * mrioc,u32 timeout)4163 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
4164 {
4165 enum mpi3mr_iocstate iocstate;
4166 int i = 0;
4167
4168 iocstate = mpi3mr_get_iocstate(mrioc);
4169 if (iocstate != MRIOC_STATE_READY)
4170 return;
4171
4172 if (!mpi3mr_get_fw_pending_ios(mrioc))
4173 return;
4174 ioc_info(mrioc,
4175 "%s :Waiting for %d seconds prior to reset for %d I/O\n",
4176 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
4177
4178 for (i = 0; i < timeout; i++) {
4179 if (!mpi3mr_get_fw_pending_ios(mrioc))
4180 break;
4181 iocstate = mpi3mr_get_iocstate(mrioc);
4182 if (iocstate != MRIOC_STATE_READY)
4183 break;
4184 msleep(1000);
4185 }
4186
4187 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
4188 mpi3mr_get_fw_pending_ios(mrioc));
4189 }
4190
4191 /**
4192 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same
4193 * @mrioc: Adapter instance reference
4194 * @scmd: SCSI command reference
4195 * @scsiio_req: MPI3 SCSI IO request
4196 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags
4197 * @wslen: write same max length
4198 *
4199 * Gets values of unmap, ndob and number of blocks from write
4200 * same scsi io and based on these values it sets divert IO flag
4201 * and reason for diverting IO to firmware.
4202 *
4203 * Return: Nothing
4204 */
mpi3mr_setup_divert_ws(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req,u32 * scsiio_flags,u16 wslen)4205 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
4206 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req,
4207 u32 *scsiio_flags, u16 wslen)
4208 {
4209 u8 unmap = 0, ndob = 0;
4210 u8 opcode = scmd->cmnd[0];
4211 u32 num_blocks = 0;
4212 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]);
4213
4214 if (opcode == WRITE_SAME_16) {
4215 unmap = scmd->cmnd[1] & 0x08;
4216 ndob = scmd->cmnd[1] & 0x01;
4217 num_blocks = get_unaligned_be32(scmd->cmnd + 10);
4218 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) {
4219 unmap = scmd->cmnd[10] & 0x08;
4220 ndob = scmd->cmnd[10] & 0x01;
4221 num_blocks = get_unaligned_be32(scmd->cmnd + 28);
4222 } else
4223 return;
4224
4225 if ((unmap) && (ndob) && (num_blocks > wslen)) {
4226 scsiio_req->msg_flags |=
4227 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
4228 *scsiio_flags |=
4229 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE;
4230 }
4231 }
4232
4233 /**
4234 * mpi3mr_eh_host_reset - Host reset error handling callback
4235 * @scmd: SCSI command reference
4236 *
4237 * Issue controller reset
4238 *
4239 * Return: SUCCESS of successful reset else FAILED
4240 */
mpi3mr_eh_host_reset(struct scsi_cmnd * scmd)4241 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
4242 {
4243 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4244 int retval = FAILED, ret;
4245
4246 ret = mpi3mr_soft_reset_handler(mrioc,
4247 MPI3MR_RESET_FROM_EH_HOS, 1);
4248 if (ret)
4249 goto out;
4250
4251 retval = SUCCESS;
4252 out:
4253 sdev_printk(KERN_INFO, scmd->device,
4254 "Host reset is %s for scmd(%p)\n",
4255 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4256
4257 return retval;
4258 }
4259
4260 /**
4261 * mpi3mr_eh_bus_reset - Bus reset error handling callback
4262 * @scmd: SCSI command reference
4263 *
4264 * Checks whether pending I/Os are present for the RAID volume;
4265 * if not there's no need to reset the adapter.
4266 *
4267 * Return: SUCCESS of successful reset else FAILED
4268 */
mpi3mr_eh_bus_reset(struct scsi_cmnd * scmd)4269 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
4270 {
4271 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4272 struct mpi3mr_stgt_priv_data *stgt_priv_data;
4273 struct mpi3mr_sdev_priv_data *sdev_priv_data;
4274 u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
4275 int retval = FAILED;
4276 unsigned int timeout = MPI3MR_RESET_TIMEOUT;
4277
4278 sdev_priv_data = scmd->device->hostdata;
4279 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
4280 stgt_priv_data = sdev_priv_data->tgt_priv_data;
4281 dev_type = stgt_priv_data->dev_type;
4282 }
4283
4284 if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
4285 mpi3mr_wait_for_host_io(mrioc,
4286 MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
4287 if (!mpi3mr_get_fw_pending_ios(mrioc)) {
4288 while (mrioc->reset_in_progress ||
4289 mrioc->prepare_for_reset ||
4290 mrioc->block_on_pci_err) {
4291 ssleep(1);
4292 if (!timeout--) {
4293 retval = FAILED;
4294 goto out;
4295 }
4296 }
4297 retval = SUCCESS;
4298 goto out;
4299 }
4300 }
4301 if (retval == FAILED)
4302 mpi3mr_print_pending_host_io(mrioc);
4303
4304 out:
4305 sdev_printk(KERN_INFO, scmd->device,
4306 "Bus reset is %s for scmd(%p)\n",
4307 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4308 return retval;
4309 }
4310
4311 /**
4312 * mpi3mr_eh_target_reset - Target reset error handling callback
4313 * @scmd: SCSI command reference
4314 *
4315 * Issue Target reset Task Management and verify the scmd is
4316 * terminated successfully and return status accordingly.
4317 *
4318 * Return: SUCCESS of successful termination of the scmd else
4319 * FAILED
4320 */
mpi3mr_eh_target_reset(struct scsi_cmnd * scmd)4321 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
4322 {
4323 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4324 struct mpi3mr_stgt_priv_data *stgt_priv_data;
4325 struct mpi3mr_sdev_priv_data *sdev_priv_data;
4326 u16 dev_handle;
4327 u8 resp_code = 0;
4328 int retval = FAILED, ret = 0;
4329
4330 sdev_printk(KERN_INFO, scmd->device,
4331 "Attempting Target Reset! scmd(%p)\n", scmd);
4332 scsi_print_command(scmd);
4333
4334 sdev_priv_data = scmd->device->hostdata;
4335 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4336 sdev_printk(KERN_INFO, scmd->device,
4337 "SCSI device is not available\n");
4338 retval = SUCCESS;
4339 goto out;
4340 }
4341
4342 stgt_priv_data = sdev_priv_data->tgt_priv_data;
4343 dev_handle = stgt_priv_data->dev_handle;
4344 if (stgt_priv_data->dev_removed) {
4345 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
4346 sdev_printk(KERN_INFO, scmd->device,
4347 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
4348 mrioc->name, dev_handle);
4349 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
4350 retval = SUCCESS;
4351 else
4352 retval = FAILED;
4353 goto out;
4354 }
4355 sdev_printk(KERN_INFO, scmd->device,
4356 "Target Reset is issued to handle(0x%04x)\n",
4357 dev_handle);
4358
4359 ret = mpi3mr_issue_tm(mrioc,
4360 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
4361 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4362 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
4363
4364 if (ret)
4365 goto out;
4366
4367 if (stgt_priv_data->pend_count) {
4368 sdev_printk(KERN_INFO, scmd->device,
4369 "%s: target has %d pending commands, target reset is failed\n",
4370 mrioc->name, stgt_priv_data->pend_count);
4371 goto out;
4372 }
4373
4374 retval = SUCCESS;
4375 out:
4376 sdev_printk(KERN_INFO, scmd->device,
4377 "%s: target reset is %s for scmd(%p)\n", mrioc->name,
4378 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4379
4380 return retval;
4381 }
4382
4383 /**
4384 * mpi3mr_eh_dev_reset- Device reset error handling callback
4385 * @scmd: SCSI command reference
4386 *
4387 * Issue lun reset Task Management and verify the scmd is
4388 * terminated successfully and return status accordingly.
4389 *
4390 * Return: SUCCESS of successful termination of the scmd else
4391 * FAILED
4392 */
mpi3mr_eh_dev_reset(struct scsi_cmnd * scmd)4393 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
4394 {
4395 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4396 struct mpi3mr_stgt_priv_data *stgt_priv_data;
4397 struct mpi3mr_sdev_priv_data *sdev_priv_data;
4398 u16 dev_handle;
4399 u8 resp_code = 0;
4400 int retval = FAILED, ret = 0;
4401
4402 sdev_printk(KERN_INFO, scmd->device,
4403 "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
4404 scsi_print_command(scmd);
4405
4406 sdev_priv_data = scmd->device->hostdata;
4407 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4408 sdev_printk(KERN_INFO, scmd->device,
4409 "SCSI device is not available\n");
4410 retval = SUCCESS;
4411 goto out;
4412 }
4413
4414 stgt_priv_data = sdev_priv_data->tgt_priv_data;
4415 dev_handle = stgt_priv_data->dev_handle;
4416 if (stgt_priv_data->dev_removed) {
4417 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
4418 sdev_printk(KERN_INFO, scmd->device,
4419 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
4420 mrioc->name, dev_handle);
4421 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
4422 retval = SUCCESS;
4423 else
4424 retval = FAILED;
4425 goto out;
4426 }
4427 sdev_printk(KERN_INFO, scmd->device,
4428 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
4429
4430 ret = mpi3mr_issue_tm(mrioc,
4431 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
4432 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4433 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
4434
4435 if (ret)
4436 goto out;
4437
4438 if (sdev_priv_data->pend_count) {
4439 sdev_printk(KERN_INFO, scmd->device,
4440 "%s: device has %d pending commands, device(LUN) reset is failed\n",
4441 mrioc->name, sdev_priv_data->pend_count);
4442 goto out;
4443 }
4444 retval = SUCCESS;
4445 out:
4446 sdev_printk(KERN_INFO, scmd->device,
4447 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
4448 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4449
4450 return retval;
4451 }
4452
4453 /**
4454 * mpi3mr_eh_abort - Callback function for abort error handling
4455 * @scmd: SCSI command reference
4456 *
4457 * Issues Abort Task Management if the command is in LLD scope
4458 * and verifies if it is aborted successfully, and return status
4459 * accordingly.
4460 *
4461 * Return: SUCCESS if the abort was successful, otherwise FAILED
4462 */
mpi3mr_eh_abort(struct scsi_cmnd * scmd)4463 static int mpi3mr_eh_abort(struct scsi_cmnd *scmd)
4464 {
4465 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4466 struct mpi3mr_stgt_priv_data *stgt_priv_data;
4467 struct mpi3mr_sdev_priv_data *sdev_priv_data;
4468 struct scmd_priv *cmd_priv;
4469 u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT;
4470 u8 resp_code = 0;
4471 int retval = FAILED, ret = 0;
4472 struct request *rq = scsi_cmd_to_rq(scmd);
4473 unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc);
4474 unsigned long scmd_age_sec = scmd_age_ms / HZ;
4475
4476 sdev_printk(KERN_INFO, scmd->device,
4477 "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd);
4478
4479 sdev_printk(KERN_INFO, scmd->device,
4480 "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n",
4481 mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ,
4482 scmd->retries, scmd->allowed);
4483
4484 scsi_print_command(scmd);
4485
4486 sdev_priv_data = scmd->device->hostdata;
4487 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4488 sdev_printk(KERN_INFO, scmd->device,
4489 "%s: Device not available, Skip issuing abort task\n",
4490 mrioc->name);
4491 retval = SUCCESS;
4492 goto out;
4493 }
4494
4495 stgt_priv_data = sdev_priv_data->tgt_priv_data;
4496 dev_handle = stgt_priv_data->dev_handle;
4497
4498 cmd_priv = scsi_cmd_priv(scmd);
4499 if (!cmd_priv->in_lld_scope ||
4500 cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) {
4501 sdev_printk(KERN_INFO, scmd->device,
4502 "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n",
4503 mrioc->name, scmd);
4504 retval = SUCCESS;
4505 goto out;
4506 }
4507
4508 if (stgt_priv_data->dev_removed) {
4509 sdev_printk(KERN_INFO, scmd->device,
4510 "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n",
4511 mrioc->name, dev_handle);
4512 retval = FAILED;
4513 goto out;
4514 }
4515
4516 ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4517 dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4518 timeout, &mrioc->host_tm_cmds, &resp_code, scmd);
4519
4520 if (ret)
4521 goto out;
4522
4523 if (cmd_priv->in_lld_scope) {
4524 sdev_printk(KERN_INFO, scmd->device,
4525 "%s: Abort task failed. scmd (0x%p) was not terminated\n",
4526 mrioc->name, scmd);
4527 goto out;
4528 }
4529
4530 retval = SUCCESS;
4531 out:
4532 sdev_printk(KERN_INFO, scmd->device,
4533 "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name,
4534 ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd);
4535
4536 return retval;
4537 }
4538
4539 /**
4540 * mpi3mr_scan_start - Scan start callback handler
4541 * @shost: SCSI host reference
4542 *
4543 * Issue port enable request asynchronously.
4544 *
4545 * Return: Nothing
4546 */
mpi3mr_scan_start(struct Scsi_Host * shost)4547 static void mpi3mr_scan_start(struct Scsi_Host *shost)
4548 {
4549 struct mpi3mr_ioc *mrioc = shost_priv(shost);
4550
4551 mrioc->scan_started = 1;
4552 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
4553 if (mpi3mr_issue_port_enable(mrioc, 1)) {
4554 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
4555 mrioc->scan_started = 0;
4556 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4557 }
4558 }
4559
4560 /**
4561 * mpi3mr_scan_finished - Scan finished callback handler
4562 * @shost: SCSI host reference
4563 * @time: Jiffies from the scan start
4564 *
4565 * Checks whether the port enable is completed or timedout or
4566 * failed and set the scan status accordingly after taking any
4567 * recovery if required.
4568 *
4569 * Return: 1 on scan finished or timed out, 0 for in progress
4570 */
mpi3mr_scan_finished(struct Scsi_Host * shost,unsigned long time)4571 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
4572 unsigned long time)
4573 {
4574 struct mpi3mr_ioc *mrioc = shost_priv(shost);
4575 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
4576 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4577
4578 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4579 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4580 ioc_err(mrioc, "port enable failed due to fault or reset\n");
4581 mpi3mr_print_fault_info(mrioc);
4582 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4583 mrioc->scan_started = 0;
4584 mrioc->init_cmds.is_waiting = 0;
4585 mrioc->init_cmds.callback = NULL;
4586 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4587 }
4588
4589 if (time >= (pe_timeout * HZ)) {
4590 ioc_err(mrioc, "port enable failed due to time out\n");
4591 mpi3mr_check_rh_fault_ioc(mrioc,
4592 MPI3MR_RESET_FROM_PE_TIMEOUT);
4593 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4594 mrioc->scan_started = 0;
4595 mrioc->init_cmds.is_waiting = 0;
4596 mrioc->init_cmds.callback = NULL;
4597 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4598 }
4599
4600 if (mrioc->scan_started)
4601 return 0;
4602
4603 if (mrioc->scan_failed) {
4604 ioc_err(mrioc,
4605 "port enable failed with status=0x%04x\n",
4606 mrioc->scan_failed);
4607 } else
4608 ioc_info(mrioc, "port enable is successfully completed\n");
4609
4610 mpi3mr_start_watchdog(mrioc);
4611 mrioc->is_driver_loading = 0;
4612 mrioc->stop_bsgs = 0;
4613 return 1;
4614 }
4615
4616 /**
4617 * mpi3mr_sdev_destroy - Slave destroy callback handler
4618 * @sdev: SCSI device reference
4619 *
4620 * Cleanup and free per device(lun) private data.
4621 *
4622 * Return: Nothing.
4623 */
mpi3mr_sdev_destroy(struct scsi_device * sdev)4624 static void mpi3mr_sdev_destroy(struct scsi_device *sdev)
4625 {
4626 struct Scsi_Host *shost;
4627 struct mpi3mr_ioc *mrioc;
4628 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4629 struct mpi3mr_tgt_dev *tgt_dev = NULL;
4630 unsigned long flags;
4631 struct scsi_target *starget;
4632 struct sas_rphy *rphy = NULL;
4633
4634 if (!sdev->hostdata)
4635 return;
4636
4637 starget = scsi_target(sdev);
4638 shost = dev_to_shost(&starget->dev);
4639 mrioc = shost_priv(shost);
4640 scsi_tgt_priv_data = starget->hostdata;
4641
4642 scsi_tgt_priv_data->num_luns--;
4643
4644 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4645 if (starget->channel == mrioc->scsi_device_channel)
4646 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4647 else if (mrioc->sas_transport_enabled && !starget->channel) {
4648 rphy = dev_to_rphy(starget->dev.parent);
4649 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4650 rphy->identify.sas_address, rphy);
4651 }
4652
4653 if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
4654 tgt_dev->starget = NULL;
4655 if (tgt_dev)
4656 mpi3mr_tgtdev_put(tgt_dev);
4657 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4658
4659 kfree(sdev->hostdata);
4660 sdev->hostdata = NULL;
4661 }
4662
4663 /**
4664 * mpi3mr_target_destroy - Target destroy callback handler
4665 * @starget: SCSI target reference
4666 *
4667 * Cleanup and free per target private data.
4668 *
4669 * Return: Nothing.
4670 */
mpi3mr_target_destroy(struct scsi_target * starget)4671 static void mpi3mr_target_destroy(struct scsi_target *starget)
4672 {
4673 struct Scsi_Host *shost;
4674 struct mpi3mr_ioc *mrioc;
4675 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4676 struct mpi3mr_tgt_dev *tgt_dev;
4677 unsigned long flags;
4678
4679 if (!starget->hostdata)
4680 return;
4681
4682 shost = dev_to_shost(&starget->dev);
4683 mrioc = shost_priv(shost);
4684 scsi_tgt_priv_data = starget->hostdata;
4685
4686 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4687 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
4688 if (tgt_dev && (tgt_dev->starget == starget) &&
4689 (tgt_dev->perst_id == starget->id))
4690 tgt_dev->starget = NULL;
4691 if (tgt_dev) {
4692 scsi_tgt_priv_data->tgt_dev = NULL;
4693 scsi_tgt_priv_data->perst_id = 0;
4694 mpi3mr_tgtdev_put(tgt_dev);
4695 mpi3mr_tgtdev_put(tgt_dev);
4696 }
4697 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4698
4699 kfree(starget->hostdata);
4700 starget->hostdata = NULL;
4701 }
4702
4703 /**
4704 * mpi3mr_sdev_configure - Slave configure callback handler
4705 * @sdev: SCSI device reference
4706 * @lim: queue limits
4707 *
4708 * Configure queue depth, max hardware sectors and virt boundary
4709 * as required
4710 *
4711 * Return: 0 always.
4712 */
mpi3mr_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)4713 static int mpi3mr_sdev_configure(struct scsi_device *sdev,
4714 struct queue_limits *lim)
4715 {
4716 struct scsi_target *starget;
4717 struct Scsi_Host *shost;
4718 struct mpi3mr_ioc *mrioc;
4719 struct mpi3mr_tgt_dev *tgt_dev = NULL;
4720 unsigned long flags;
4721 int retval = 0;
4722 struct sas_rphy *rphy = NULL;
4723
4724 starget = scsi_target(sdev);
4725 shost = dev_to_shost(&starget->dev);
4726 mrioc = shost_priv(shost);
4727
4728 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4729 if (starget->channel == mrioc->scsi_device_channel)
4730 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4731 else if (mrioc->sas_transport_enabled && !starget->channel) {
4732 rphy = dev_to_rphy(starget->dev.parent);
4733 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4734 rphy->identify.sas_address, rphy);
4735 }
4736 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4737 if (!tgt_dev)
4738 return -ENXIO;
4739
4740 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
4741
4742 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
4743 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
4744
4745 mpi3mr_configure_tgt_dev(tgt_dev, lim);
4746 mpi3mr_tgtdev_put(tgt_dev);
4747 return retval;
4748 }
4749
4750 /**
4751 * mpi3mr_sdev_init -Slave alloc callback handler
4752 * @sdev: SCSI device reference
4753 *
4754 * Allocate per device(lun) private data and initialize it.
4755 *
4756 * Return: 0 on success -ENOMEM on memory allocation failure.
4757 */
mpi3mr_sdev_init(struct scsi_device * sdev)4758 static int mpi3mr_sdev_init(struct scsi_device *sdev)
4759 {
4760 struct Scsi_Host *shost;
4761 struct mpi3mr_ioc *mrioc;
4762 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4763 struct mpi3mr_tgt_dev *tgt_dev = NULL;
4764 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
4765 unsigned long flags;
4766 struct scsi_target *starget;
4767 int retval = 0;
4768 struct sas_rphy *rphy = NULL;
4769
4770 starget = scsi_target(sdev);
4771 shost = dev_to_shost(&starget->dev);
4772 mrioc = shost_priv(shost);
4773 scsi_tgt_priv_data = starget->hostdata;
4774
4775 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4776
4777 if (starget->channel == mrioc->scsi_device_channel)
4778 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4779 else if (mrioc->sas_transport_enabled && !starget->channel) {
4780 rphy = dev_to_rphy(starget->dev.parent);
4781 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4782 rphy->identify.sas_address, rphy);
4783 }
4784
4785 if (tgt_dev) {
4786 if (tgt_dev->starget == NULL)
4787 tgt_dev->starget = starget;
4788 mpi3mr_tgtdev_put(tgt_dev);
4789 retval = 0;
4790 } else {
4791 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4792 return -ENXIO;
4793 }
4794
4795 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4796
4797 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
4798 if (!scsi_dev_priv_data)
4799 return -ENOMEM;
4800
4801 scsi_dev_priv_data->lun_id = sdev->lun;
4802 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
4803 sdev->hostdata = scsi_dev_priv_data;
4804
4805 scsi_tgt_priv_data->num_luns++;
4806
4807 return retval;
4808 }
4809
4810 /**
4811 * mpi3mr_target_alloc - Target alloc callback handler
4812 * @starget: SCSI target reference
4813 *
4814 * Allocate per target private data and initialize it.
4815 *
4816 * Return: 0 on success -ENOMEM on memory allocation failure.
4817 */
mpi3mr_target_alloc(struct scsi_target * starget)4818 static int mpi3mr_target_alloc(struct scsi_target *starget)
4819 {
4820 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4821 struct mpi3mr_ioc *mrioc = shost_priv(shost);
4822 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4823 struct mpi3mr_tgt_dev *tgt_dev;
4824 unsigned long flags;
4825 int retval = 0;
4826 struct sas_rphy *rphy = NULL;
4827
4828 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
4829 if (!scsi_tgt_priv_data)
4830 return -ENOMEM;
4831
4832 starget->hostdata = scsi_tgt_priv_data;
4833
4834 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4835 if (starget->channel == mrioc->scsi_device_channel) {
4836 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4837 if (tgt_dev && !tgt_dev->is_hidden) {
4838 scsi_tgt_priv_data->starget = starget;
4839 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
4840 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
4841 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
4842 scsi_tgt_priv_data->tgt_dev = tgt_dev;
4843 tgt_dev->starget = starget;
4844 atomic_set(&scsi_tgt_priv_data->block_io, 0);
4845 retval = 0;
4846 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
4847 ((tgt_dev->dev_spec.pcie_inf.dev_info &
4848 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
4849 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
4850 ((tgt_dev->dev_spec.pcie_inf.dev_info &
4851 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) !=
4852 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0))
4853 scsi_tgt_priv_data->dev_nvme_dif = 1;
4854 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
4855 scsi_tgt_priv_data->wslen = tgt_dev->wslen;
4856 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
4857 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg;
4858 } else
4859 retval = -ENXIO;
4860 } else if (mrioc->sas_transport_enabled && !starget->channel) {
4861 rphy = dev_to_rphy(starget->dev.parent);
4862 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4863 rphy->identify.sas_address, rphy);
4864 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
4865 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
4866 scsi_tgt_priv_data->starget = starget;
4867 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
4868 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
4869 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
4870 scsi_tgt_priv_data->tgt_dev = tgt_dev;
4871 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
4872 scsi_tgt_priv_data->wslen = tgt_dev->wslen;
4873 tgt_dev->starget = starget;
4874 atomic_set(&scsi_tgt_priv_data->block_io, 0);
4875 retval = 0;
4876 } else
4877 retval = -ENXIO;
4878 }
4879 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4880
4881 return retval;
4882 }
4883
4884 /**
4885 * mpi3mr_check_return_unmap - Whether an unmap is allowed
4886 * @mrioc: Adapter instance reference
4887 * @scmd: SCSI Command reference
4888 *
4889 * The controller hardware cannot handle certain unmap commands
4890 * for NVMe drives, this routine checks those and return true
4891 * and completes the SCSI command with proper status and sense
4892 * data.
4893 *
4894 * Return: TRUE for not allowed unmap, FALSE otherwise.
4895 */
mpi3mr_check_return_unmap(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd)4896 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
4897 struct scsi_cmnd *scmd)
4898 {
4899 unsigned char *buf;
4900 u16 param_len, desc_len, trunc_param_len;
4901
4902 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
4903
4904 if (mrioc->pdev->revision) {
4905 if ((param_len > 24) && ((param_len - 8) & 0xF)) {
4906 trunc_param_len -= (param_len - 8) & 0xF;
4907 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
4908 dprint_scsi_err(mrioc,
4909 "truncating param_len from (%d) to (%d)\n",
4910 param_len, trunc_param_len);
4911 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
4912 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
4913 }
4914 return false;
4915 }
4916
4917 if (!param_len) {
4918 ioc_warn(mrioc,
4919 "%s: cdb received with zero parameter length\n",
4920 __func__);
4921 scsi_print_command(scmd);
4922 scmd->result = DID_OK << 16;
4923 scsi_done(scmd);
4924 return true;
4925 }
4926
4927 if (param_len < 24) {
4928 ioc_warn(mrioc,
4929 "%s: cdb received with invalid param_len: %d\n",
4930 __func__, param_len);
4931 scsi_print_command(scmd);
4932 scmd->result = SAM_STAT_CHECK_CONDITION;
4933 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4934 0x1A, 0);
4935 scsi_done(scmd);
4936 return true;
4937 }
4938 if (param_len != scsi_bufflen(scmd)) {
4939 ioc_warn(mrioc,
4940 "%s: cdb received with param_len: %d bufflen: %d\n",
4941 __func__, param_len, scsi_bufflen(scmd));
4942 scsi_print_command(scmd);
4943 scmd->result = SAM_STAT_CHECK_CONDITION;
4944 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4945 0x1A, 0);
4946 scsi_done(scmd);
4947 return true;
4948 }
4949 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
4950 if (!buf) {
4951 scsi_print_command(scmd);
4952 scmd->result = SAM_STAT_CHECK_CONDITION;
4953 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4954 0x55, 0x03);
4955 scsi_done(scmd);
4956 return true;
4957 }
4958 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
4959 desc_len = get_unaligned_be16(&buf[2]);
4960
4961 if (desc_len < 16) {
4962 ioc_warn(mrioc,
4963 "%s: Invalid descriptor length in param list: %d\n",
4964 __func__, desc_len);
4965 scsi_print_command(scmd);
4966 scmd->result = SAM_STAT_CHECK_CONDITION;
4967 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4968 0x26, 0);
4969 scsi_done(scmd);
4970 kfree(buf);
4971 return true;
4972 }
4973
4974 if (param_len > (desc_len + 8)) {
4975 trunc_param_len = desc_len + 8;
4976 scsi_print_command(scmd);
4977 dprint_scsi_err(mrioc,
4978 "truncating param_len(%d) to desc_len+8(%d)\n",
4979 param_len, trunc_param_len);
4980 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
4981 scsi_print_command(scmd);
4982 }
4983
4984 kfree(buf);
4985 return false;
4986 }
4987
4988 /**
4989 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
4990 * @scmd: SCSI Command reference
4991 *
4992 * Checks whether a cdb is allowed during shutdown or not.
4993 *
4994 * Return: TRUE for allowed commands, FALSE otherwise.
4995 */
4996
mpi3mr_allow_scmd_to_fw(struct scsi_cmnd * scmd)4997 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
4998 {
4999 switch (scmd->cmnd[0]) {
5000 case SYNCHRONIZE_CACHE:
5001 case START_STOP:
5002 return true;
5003 default:
5004 return false;
5005 }
5006 }
5007
5008 /**
5009 * mpi3mr_qcmd - I/O request despatcher
5010 * @shost: SCSI Host reference
5011 * @scmd: SCSI Command reference
5012 *
5013 * Issues the SCSI Command as an MPI3 request.
5014 *
5015 * Return: 0 on successful queueing of the request or if the
5016 * request is completed with failure.
5017 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
5018 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
5019 */
mpi3mr_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5020 static int mpi3mr_qcmd(struct Scsi_Host *shost,
5021 struct scsi_cmnd *scmd)
5022 {
5023 struct mpi3mr_ioc *mrioc = shost_priv(shost);
5024 struct mpi3mr_stgt_priv_data *stgt_priv_data;
5025 struct mpi3mr_sdev_priv_data *sdev_priv_data;
5026 struct scmd_priv *scmd_priv_data = NULL;
5027 struct mpi3_scsi_io_request *scsiio_req = NULL;
5028 struct op_req_qinfo *op_req_q = NULL;
5029 int retval = 0;
5030 u16 dev_handle;
5031 u16 host_tag;
5032 u32 scsiio_flags = 0, data_len_blks = 0;
5033 struct request *rq = scsi_cmd_to_rq(scmd);
5034 int iprio_class;
5035 u8 is_pcie_dev = 0;
5036 u32 tracked_io_sz = 0;
5037 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
5038 struct mpi3mr_throttle_group_info *tg = NULL;
5039
5040 if (mrioc->unrecoverable) {
5041 scmd->result = DID_ERROR << 16;
5042 scsi_done(scmd);
5043 goto out;
5044 }
5045
5046 sdev_priv_data = scmd->device->hostdata;
5047 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
5048 scmd->result = DID_NO_CONNECT << 16;
5049 scsi_done(scmd);
5050 goto out;
5051 }
5052
5053 if (mrioc->stop_drv_processing &&
5054 !(mpi3mr_allow_scmd_to_fw(scmd))) {
5055 scmd->result = DID_NO_CONNECT << 16;
5056 scsi_done(scmd);
5057 goto out;
5058 }
5059
5060 stgt_priv_data = sdev_priv_data->tgt_priv_data;
5061 dev_handle = stgt_priv_data->dev_handle;
5062
5063 /* Avoid error handling escalation when device is removed or blocked */
5064
5065 if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5066 scmd->cmnd[0] == TEST_UNIT_READY &&
5067 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) {
5068 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5069 scsi_done(scmd);
5070 goto out;
5071 }
5072
5073 if (mrioc->reset_in_progress || mrioc->prepare_for_reset
5074 || mrioc->block_on_pci_err) {
5075 retval = SCSI_MLQUEUE_HOST_BUSY;
5076 goto out;
5077 }
5078
5079 if (atomic_read(&stgt_priv_data->block_io)) {
5080 if (mrioc->stop_drv_processing) {
5081 scmd->result = DID_NO_CONNECT << 16;
5082 scsi_done(scmd);
5083 goto out;
5084 }
5085 retval = SCSI_MLQUEUE_DEVICE_BUSY;
5086 goto out;
5087 }
5088
5089 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
5090 scmd->result = DID_NO_CONNECT << 16;
5091 scsi_done(scmd);
5092 goto out;
5093 }
5094 if (stgt_priv_data->dev_removed) {
5095 scmd->result = DID_NO_CONNECT << 16;
5096 scsi_done(scmd);
5097 goto out;
5098 }
5099
5100 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
5101 is_pcie_dev = 1;
5102 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
5103 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
5104 mpi3mr_check_return_unmap(mrioc, scmd))
5105 goto out;
5106
5107 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
5108 if (host_tag == MPI3MR_HOSTTAG_INVALID) {
5109 scmd->result = DID_ERROR << 16;
5110 scsi_done(scmd);
5111 goto out;
5112 }
5113
5114 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5115 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
5116 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5117 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
5118 else
5119 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
5120
5121 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
5122
5123 if (sdev_priv_data->ncq_prio_enable) {
5124 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5125 if (iprio_class == IOPRIO_CLASS_RT)
5126 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
5127 }
5128
5129 if (scmd->cmd_len > 16)
5130 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
5131
5132 scmd_priv_data = scsi_cmd_priv(scmd);
5133 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
5134 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
5135 scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
5136 scsiio_req->host_tag = cpu_to_le16(host_tag);
5137
5138 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
5139
5140 if (stgt_priv_data->wslen)
5141 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags,
5142 stgt_priv_data->wslen);
5143
5144 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
5145 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
5146 scsiio_req->dev_handle = cpu_to_le16(dev_handle);
5147 scsiio_req->flags = cpu_to_le32(scsiio_flags);
5148 int_to_scsilun(sdev_priv_data->lun_id,
5149 (struct scsi_lun *)scsiio_req->lun);
5150
5151 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
5152 mpi3mr_clear_scmd_priv(mrioc, scmd);
5153 retval = SCSI_MLQUEUE_HOST_BUSY;
5154 goto out;
5155 }
5156 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
5157 data_len_blks = scsi_bufflen(scmd) >> 9;
5158 if ((data_len_blks >= mrioc->io_throttle_data_length) &&
5159 stgt_priv_data->io_throttle_enabled) {
5160 tracked_io_sz = data_len_blks;
5161 tg = stgt_priv_data->throttle_group;
5162 if (tg) {
5163 ioc_pend_data_len = atomic_add_return(data_len_blks,
5164 &mrioc->pend_large_data_sz);
5165 tg_pend_data_len = atomic_add_return(data_len_blks,
5166 &tg->pend_large_data_sz);
5167 if (!tg->io_divert && ((ioc_pend_data_len >=
5168 mrioc->io_throttle_high) ||
5169 (tg_pend_data_len >= tg->high))) {
5170 tg->io_divert = 1;
5171 tg->need_qd_reduction = 1;
5172 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
5173 tg, 1);
5174 mpi3mr_queue_qd_reduction_event(mrioc, tg);
5175 }
5176 } else {
5177 ioc_pend_data_len = atomic_add_return(data_len_blks,
5178 &mrioc->pend_large_data_sz);
5179 if (ioc_pend_data_len >= mrioc->io_throttle_high)
5180 stgt_priv_data->io_divert = 1;
5181 }
5182 }
5183
5184 if (stgt_priv_data->io_divert) {
5185 scsiio_req->msg_flags |=
5186 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
5187 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
5188 }
5189 scsiio_req->flags |= cpu_to_le32(scsiio_flags);
5190
5191 if (mpi3mr_op_request_post(mrioc, op_req_q,
5192 scmd_priv_data->mpi3mr_scsiio_req)) {
5193 mpi3mr_clear_scmd_priv(mrioc, scmd);
5194 retval = SCSI_MLQUEUE_HOST_BUSY;
5195 if (tracked_io_sz) {
5196 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
5197 if (tg)
5198 atomic_sub(tracked_io_sz,
5199 &tg->pend_large_data_sz);
5200 }
5201 goto out;
5202 }
5203
5204 out:
5205 return retval;
5206 }
5207
5208 static const struct scsi_host_template mpi3mr_driver_template = {
5209 .module = THIS_MODULE,
5210 .name = "MPI3 Storage Controller",
5211 .proc_name = MPI3MR_DRIVER_NAME,
5212 .queuecommand = mpi3mr_qcmd,
5213 .target_alloc = mpi3mr_target_alloc,
5214 .sdev_init = mpi3mr_sdev_init,
5215 .sdev_configure = mpi3mr_sdev_configure,
5216 .target_destroy = mpi3mr_target_destroy,
5217 .sdev_destroy = mpi3mr_sdev_destroy,
5218 .scan_finished = mpi3mr_scan_finished,
5219 .scan_start = mpi3mr_scan_start,
5220 .change_queue_depth = mpi3mr_change_queue_depth,
5221 .eh_abort_handler = mpi3mr_eh_abort,
5222 .eh_device_reset_handler = mpi3mr_eh_dev_reset,
5223 .eh_target_reset_handler = mpi3mr_eh_target_reset,
5224 .eh_bus_reset_handler = mpi3mr_eh_bus_reset,
5225 .eh_host_reset_handler = mpi3mr_eh_host_reset,
5226 .bios_param = mpi3mr_bios_param,
5227 .map_queues = mpi3mr_map_queues,
5228 .mq_poll = mpi3mr_blk_mq_poll,
5229 .no_write_same = 1,
5230 .can_queue = 1,
5231 .this_id = -1,
5232 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES,
5233 /* max xfer supported is 1M (2K in 512 byte sized sectors)
5234 */
5235 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
5236 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
5237 .max_segment_size = 0xffffffff,
5238 .track_queue_depth = 1,
5239 .cmd_size = sizeof(struct scmd_priv),
5240 .shost_groups = mpi3mr_host_groups,
5241 .sdev_groups = mpi3mr_dev_groups,
5242 };
5243
5244 /**
5245 * mpi3mr_init_drv_cmd - Initialize internal command tracker
5246 * @cmdptr: Internal command tracker
5247 * @host_tag: Host tag used for the specific command
5248 *
5249 * Initialize the internal command tracker structure with
5250 * specified host tag.
5251 *
5252 * Return: Nothing.
5253 */
mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd * cmdptr,u16 host_tag)5254 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
5255 u16 host_tag)
5256 {
5257 mutex_init(&cmdptr->mutex);
5258 cmdptr->reply = NULL;
5259 cmdptr->state = MPI3MR_CMD_NOTUSED;
5260 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5261 cmdptr->host_tag = host_tag;
5262 }
5263
5264 /**
5265 * osintfc_mrioc_security_status -Check controller secure status
5266 * @pdev: PCI device instance
5267 *
5268 * Read the Device Serial Number capability from PCI config
5269 * space and decide whether the controller is secure or not.
5270 *
5271 * Return: 0 on success, non-zero on failure.
5272 */
5273 static int
osintfc_mrioc_security_status(struct pci_dev * pdev)5274 osintfc_mrioc_security_status(struct pci_dev *pdev)
5275 {
5276 u32 cap_data;
5277 int base;
5278 u32 ctlr_status;
5279 u32 debug_status;
5280 int retval = 0;
5281
5282 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
5283 if (!base) {
5284 dev_err(&pdev->dev,
5285 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
5286 return -1;
5287 }
5288
5289 pci_read_config_dword(pdev, base + 4, &cap_data);
5290
5291 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
5292 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
5293
5294 switch (ctlr_status) {
5295 case MPI3MR_INVALID_DEVICE:
5296 dev_err(&pdev->dev,
5297 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
5298 __func__, pdev->device, pdev->subsystem_vendor,
5299 pdev->subsystem_device);
5300 retval = -1;
5301 break;
5302 case MPI3MR_CONFIG_SECURE_DEVICE:
5303 if (!debug_status)
5304 dev_info(&pdev->dev,
5305 "%s: Config secure ctlr is detected\n",
5306 __func__);
5307 break;
5308 case MPI3MR_HARD_SECURE_DEVICE:
5309 break;
5310 case MPI3MR_TAMPERED_DEVICE:
5311 dev_err(&pdev->dev,
5312 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
5313 __func__, pdev->device, pdev->subsystem_vendor,
5314 pdev->subsystem_device);
5315 retval = -1;
5316 break;
5317 default:
5318 retval = -1;
5319 break;
5320 }
5321
5322 if (!retval && debug_status) {
5323 dev_err(&pdev->dev,
5324 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
5325 __func__, pdev->device, pdev->subsystem_vendor,
5326 pdev->subsystem_device);
5327 retval = -1;
5328 }
5329
5330 return retval;
5331 }
5332
5333 /**
5334 * mpi3mr_probe - PCI probe callback
5335 * @pdev: PCI device instance
5336 * @id: PCI device ID details
5337 *
5338 * controller initialization routine. Checks the security status
5339 * of the controller and if it is invalid or tampered return the
5340 * probe without initializing the controller. Otherwise,
5341 * allocate per adapter instance through shost_priv and
5342 * initialize controller specific data structures, initializae
5343 * the controller hardware, add shost to the SCSI subsystem.
5344 *
5345 * Return: 0 on success, non-zero on failure.
5346 */
5347
5348 static int
mpi3mr_probe(struct pci_dev * pdev,const struct pci_device_id * id)5349 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5350 {
5351 struct mpi3mr_ioc *mrioc = NULL;
5352 struct Scsi_Host *shost = NULL;
5353 int retval = 0, i;
5354
5355 if (osintfc_mrioc_security_status(pdev)) {
5356 warn_non_secure_ctlr = 1;
5357 return 1; /* For Invalid and Tampered device */
5358 }
5359
5360 shost = scsi_host_alloc(&mpi3mr_driver_template,
5361 sizeof(struct mpi3mr_ioc));
5362 if (!shost) {
5363 retval = -ENODEV;
5364 goto shost_failed;
5365 }
5366
5367 mrioc = shost_priv(shost);
5368 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL);
5369 if (retval < 0)
5370 goto id_alloc_failed;
5371 mrioc->id = (u8)retval;
5372 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
5373 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
5374 INIT_LIST_HEAD(&mrioc->list);
5375 spin_lock(&mrioc_list_lock);
5376 list_add_tail(&mrioc->list, &mrioc_list);
5377 spin_unlock(&mrioc_list_lock);
5378
5379 spin_lock_init(&mrioc->admin_req_lock);
5380 spin_lock_init(&mrioc->reply_free_queue_lock);
5381 spin_lock_init(&mrioc->sbq_lock);
5382 spin_lock_init(&mrioc->fwevt_lock);
5383 spin_lock_init(&mrioc->tgtdev_lock);
5384 spin_lock_init(&mrioc->watchdog_lock);
5385 spin_lock_init(&mrioc->chain_buf_lock);
5386 spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock);
5387 spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock);
5388 spin_lock_init(&mrioc->sas_node_lock);
5389 spin_lock_init(&mrioc->trigger_lock);
5390
5391 INIT_LIST_HEAD(&mrioc->fwevt_list);
5392 INIT_LIST_HEAD(&mrioc->tgtdev_list);
5393 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
5394 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
5395 INIT_LIST_HEAD(&mrioc->sas_expander_list);
5396 INIT_LIST_HEAD(&mrioc->hba_port_table_list);
5397 INIT_LIST_HEAD(&mrioc->enclosure_list);
5398
5399 mutex_init(&mrioc->reset_mutex);
5400 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
5401 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
5402 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
5403 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
5404 mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
5405 MPI3MR_HOSTTAG_TRANSPORT_CMDS);
5406
5407 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5408 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
5409 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
5410
5411 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5412 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
5413 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
5414
5415 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
5416 !pdev->revision)
5417 mrioc->enable_segqueue = false;
5418 else
5419 mrioc->enable_segqueue = true;
5420
5421 init_waitqueue_head(&mrioc->reset_waitq);
5422 mrioc->logging_level = logging_level;
5423 mrioc->shost = shost;
5424 mrioc->pdev = pdev;
5425 mrioc->stop_bsgs = 1;
5426
5427 mrioc->max_sgl_entries = max_sgl_entries;
5428 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
5429 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
5430 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
5431 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
5432 else {
5433 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
5434 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
5435 }
5436
5437 /* init shost parameters */
5438 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
5439 shost->max_lun = -1;
5440 shost->unique_id = mrioc->id;
5441
5442 shost->max_channel = 0;
5443 shost->max_id = 0xFFFFFFFF;
5444
5445 shost->host_tagset = 1;
5446
5447 if (prot_mask >= 0)
5448 scsi_host_set_prot(shost, prot_mask);
5449 else {
5450 prot_mask = SHOST_DIF_TYPE1_PROTECTION
5451 | SHOST_DIF_TYPE2_PROTECTION
5452 | SHOST_DIF_TYPE3_PROTECTION;
5453 scsi_host_set_prot(shost, prot_mask);
5454 }
5455
5456 ioc_info(mrioc,
5457 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
5458 __func__,
5459 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5460 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5461 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5462 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5463 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5464 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5465 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5466
5467 if (prot_guard_mask)
5468 scsi_host_set_guard(shost, (prot_guard_mask & 3));
5469 else
5470 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
5471
5472 mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
5473 "%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id);
5474 if (!mrioc->fwevt_worker_thread) {
5475 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
5476 __FILE__, __LINE__, __func__);
5477 retval = -ENODEV;
5478 goto fwevtthread_failed;
5479 }
5480
5481 mrioc->is_driver_loading = 1;
5482 mrioc->cpu_count = num_online_cpus();
5483 if (mpi3mr_setup_resources(mrioc)) {
5484 ioc_err(mrioc, "setup resources failed\n");
5485 retval = -ENODEV;
5486 goto resource_alloc_failed;
5487 }
5488 if (mpi3mr_init_ioc(mrioc)) {
5489 ioc_err(mrioc, "initializing IOC failed\n");
5490 retval = -ENODEV;
5491 goto init_ioc_failed;
5492 }
5493
5494 shost->nr_hw_queues = mrioc->num_op_reply_q;
5495 if (mrioc->active_poll_qcount)
5496 shost->nr_maps = 3;
5497
5498 shost->can_queue = mrioc->max_host_ios;
5499 shost->sg_tablesize = mrioc->max_sgl_entries;
5500 shost->max_id = mrioc->facts.max_perids + 1;
5501
5502 retval = scsi_add_host(shost, &pdev->dev);
5503 if (retval) {
5504 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
5505 __FILE__, __LINE__, __func__);
5506 goto addhost_failed;
5507 }
5508
5509 scsi_scan_host(shost);
5510 mpi3mr_bsg_init(mrioc);
5511 return retval;
5512
5513 addhost_failed:
5514 mpi3mr_stop_watchdog(mrioc);
5515 mpi3mr_cleanup_ioc(mrioc);
5516 init_ioc_failed:
5517 mpi3mr_free_mem(mrioc);
5518 mpi3mr_cleanup_resources(mrioc);
5519 resource_alloc_failed:
5520 destroy_workqueue(mrioc->fwevt_worker_thread);
5521 fwevtthread_failed:
5522 ida_free(&mrioc_ida, mrioc->id);
5523 spin_lock(&mrioc_list_lock);
5524 list_del(&mrioc->list);
5525 spin_unlock(&mrioc_list_lock);
5526 id_alloc_failed:
5527 scsi_host_put(shost);
5528 shost_failed:
5529 return retval;
5530 }
5531
5532 /**
5533 * mpi3mr_remove - PCI remove callback
5534 * @pdev: PCI device instance
5535 *
5536 * Cleanup the IOC by issuing MUR and shutdown notification.
5537 * Free up all memory and resources associated with the
5538 * controllerand target devices, unregister the shost.
5539 *
5540 * Return: Nothing.
5541 */
mpi3mr_remove(struct pci_dev * pdev)5542 static void mpi3mr_remove(struct pci_dev *pdev)
5543 {
5544 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5545 struct mpi3mr_ioc *mrioc;
5546 struct workqueue_struct *wq;
5547 unsigned long flags;
5548 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
5549 struct mpi3mr_hba_port *port, *hba_port_next;
5550 struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
5551
5552 if (!shost)
5553 return;
5554
5555 mrioc = shost_priv(shost);
5556 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5557 ssleep(1);
5558
5559 if (mrioc->block_on_pci_err) {
5560 mrioc->block_on_pci_err = false;
5561 scsi_unblock_requests(shost);
5562 mrioc->unrecoverable = 1;
5563 }
5564
5565 if (!pci_device_is_present(mrioc->pdev) ||
5566 mrioc->pci_err_recovery) {
5567 mrioc->unrecoverable = 1;
5568 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5569 }
5570
5571 mpi3mr_bsg_exit(mrioc);
5572 mrioc->stop_drv_processing = 1;
5573 mpi3mr_cleanup_fwevt_list(mrioc);
5574 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
5575 wq = mrioc->fwevt_worker_thread;
5576 mrioc->fwevt_worker_thread = NULL;
5577 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
5578 if (wq)
5579 destroy_workqueue(wq);
5580
5581 if (mrioc->sas_transport_enabled)
5582 sas_remove_host(shost);
5583 else
5584 scsi_remove_host(shost);
5585
5586 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
5587 list) {
5588 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
5589 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
5590 mpi3mr_tgtdev_put(tgtdev);
5591 }
5592 mpi3mr_stop_watchdog(mrioc);
5593 mpi3mr_cleanup_ioc(mrioc);
5594 mpi3mr_free_mem(mrioc);
5595 mpi3mr_cleanup_resources(mrioc);
5596
5597 spin_lock_irqsave(&mrioc->sas_node_lock, flags);
5598 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
5599 &mrioc->sas_expander_list, list) {
5600 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
5601 mpi3mr_expander_node_remove(mrioc, sas_expander);
5602 spin_lock_irqsave(&mrioc->sas_node_lock, flags);
5603 }
5604 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
5605 ioc_info(mrioc,
5606 "removing hba_port entry: %p port: %d from hba_port list\n",
5607 port, port->port_id);
5608 list_del(&port->list);
5609 kfree(port);
5610 }
5611 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
5612
5613 if (mrioc->sas_hba.num_phys) {
5614 kfree(mrioc->sas_hba.phy);
5615 mrioc->sas_hba.phy = NULL;
5616 mrioc->sas_hba.num_phys = 0;
5617 }
5618
5619 ida_free(&mrioc_ida, mrioc->id);
5620 spin_lock(&mrioc_list_lock);
5621 list_del(&mrioc->list);
5622 spin_unlock(&mrioc_list_lock);
5623
5624 scsi_host_put(shost);
5625 }
5626
5627 /**
5628 * mpi3mr_shutdown - PCI shutdown callback
5629 * @pdev: PCI device instance
5630 *
5631 * Free up all memory and resources associated with the
5632 * controller
5633 *
5634 * Return: Nothing.
5635 */
mpi3mr_shutdown(struct pci_dev * pdev)5636 static void mpi3mr_shutdown(struct pci_dev *pdev)
5637 {
5638 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5639 struct mpi3mr_ioc *mrioc;
5640 struct workqueue_struct *wq;
5641 unsigned long flags;
5642
5643 if (!shost)
5644 return;
5645
5646 mrioc = shost_priv(shost);
5647 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5648 ssleep(1);
5649
5650 mrioc->stop_drv_processing = 1;
5651 mpi3mr_cleanup_fwevt_list(mrioc);
5652 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
5653 wq = mrioc->fwevt_worker_thread;
5654 mrioc->fwevt_worker_thread = NULL;
5655 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
5656 if (wq)
5657 destroy_workqueue(wq);
5658
5659 mpi3mr_stop_watchdog(mrioc);
5660 mpi3mr_cleanup_ioc(mrioc);
5661 mpi3mr_cleanup_resources(mrioc);
5662 }
5663
5664 /**
5665 * mpi3mr_suspend - PCI power management suspend callback
5666 * @dev: Device struct
5667 *
5668 * Change the power state to the given value and cleanup the IOC
5669 * by issuing MUR and shutdown notification
5670 *
5671 * Return: 0 always.
5672 */
5673 static int __maybe_unused
mpi3mr_suspend(struct device * dev)5674 mpi3mr_suspend(struct device *dev)
5675 {
5676 struct pci_dev *pdev = to_pci_dev(dev);
5677 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5678 struct mpi3mr_ioc *mrioc;
5679
5680 if (!shost)
5681 return 0;
5682
5683 mrioc = shost_priv(shost);
5684 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5685 ssleep(1);
5686 mrioc->stop_drv_processing = 1;
5687 mpi3mr_cleanup_fwevt_list(mrioc);
5688 scsi_block_requests(shost);
5689 mpi3mr_stop_watchdog(mrioc);
5690 mpi3mr_cleanup_ioc(mrioc);
5691
5692 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
5693 pdev, pci_name(pdev));
5694 mpi3mr_cleanup_resources(mrioc);
5695
5696 return 0;
5697 }
5698
5699 /**
5700 * mpi3mr_resume - PCI power management resume callback
5701 * @dev: Device struct
5702 *
5703 * Restore the power state to D0 and reinitialize the controller
5704 * and resume I/O operations to the target devices
5705 *
5706 * Return: 0 on success, non-zero on failure
5707 */
5708 static int __maybe_unused
mpi3mr_resume(struct device * dev)5709 mpi3mr_resume(struct device *dev)
5710 {
5711 struct pci_dev *pdev = to_pci_dev(dev);
5712 struct Scsi_Host *shost = pci_get_drvdata(pdev);
5713 struct mpi3mr_ioc *mrioc;
5714 pci_power_t device_state = pdev->current_state;
5715 int r;
5716
5717 if (!shost)
5718 return 0;
5719
5720 mrioc = shost_priv(shost);
5721
5722 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
5723 pdev, pci_name(pdev), device_state);
5724 mrioc->pdev = pdev;
5725 mrioc->cpu_count = num_online_cpus();
5726 r = mpi3mr_setup_resources(mrioc);
5727 if (r) {
5728 ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
5729 __func__, r);
5730 return r;
5731 }
5732
5733 mrioc->stop_drv_processing = 0;
5734 mpi3mr_invalidate_devhandles(mrioc);
5735 mpi3mr_free_enclosure_list(mrioc);
5736 mpi3mr_memset_buffers(mrioc);
5737 r = mpi3mr_reinit_ioc(mrioc, 1);
5738 if (r) {
5739 ioc_err(mrioc, "resuming controller failed[%d]\n", r);
5740 return r;
5741 }
5742 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5743 scsi_unblock_requests(shost);
5744 mrioc->device_refresh_on = 0;
5745 mpi3mr_start_watchdog(mrioc);
5746
5747 return 0;
5748 }
5749
5750 /**
5751 * mpi3mr_pcierr_error_detected - PCI error detected callback
5752 * @pdev: PCI device instance
5753 * @state: channel state
5754 *
5755 * This function is called by the PCI error recovery driver and
5756 * based on the state passed the driver decides what actions to
5757 * be recommended back to PCI driver.
5758 *
5759 * For all of the states if there is no valid mrioc or scsi host
5760 * references in the PCI device then this function will return
5761 * the result as disconnect.
5762 *
5763 * For normal state, this function will return the result as can
5764 * recover.
5765 *
5766 * For frozen state, this function will block for any pending
5767 * controller initialization or re-initialization to complete,
5768 * stop any new interactions with the controller and return
5769 * status as reset required.
5770 *
5771 * For permanent failure state, this function will mark the
5772 * controller as unrecoverable and return status as disconnect.
5773 *
5774 * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or
5775 * DISCONNECT based on the controller state.
5776 */
5777 static pci_ers_result_t
mpi3mr_pcierr_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5778 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5779 {
5780 struct Scsi_Host *shost;
5781 struct mpi3mr_ioc *mrioc;
5782 unsigned int timeout = MPI3MR_RESET_TIMEOUT;
5783
5784 dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__,
5785 state);
5786
5787 shost = pci_get_drvdata(pdev);
5788 mrioc = shost_priv(shost);
5789
5790 switch (state) {
5791 case pci_channel_io_normal:
5792 return PCI_ERS_RESULT_CAN_RECOVER;
5793 case pci_channel_io_frozen:
5794 mrioc->pci_err_recovery = true;
5795 mrioc->block_on_pci_err = true;
5796 do {
5797 if (mrioc->reset_in_progress || mrioc->is_driver_loading)
5798 ssleep(1);
5799 else
5800 break;
5801 } while (--timeout);
5802
5803 if (!timeout) {
5804 mrioc->pci_err_recovery = true;
5805 mrioc->block_on_pci_err = true;
5806 mrioc->unrecoverable = 1;
5807 mpi3mr_stop_watchdog(mrioc);
5808 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5809 return PCI_ERS_RESULT_DISCONNECT;
5810 }
5811
5812 scsi_block_requests(mrioc->shost);
5813 mpi3mr_stop_watchdog(mrioc);
5814 mpi3mr_cleanup_resources(mrioc);
5815 return PCI_ERS_RESULT_NEED_RESET;
5816 case pci_channel_io_perm_failure:
5817 mrioc->pci_err_recovery = true;
5818 mrioc->block_on_pci_err = true;
5819 mrioc->unrecoverable = 1;
5820 mpi3mr_stop_watchdog(mrioc);
5821 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5822 return PCI_ERS_RESULT_DISCONNECT;
5823 default:
5824 return PCI_ERS_RESULT_DISCONNECT;
5825 }
5826 }
5827
5828 /**
5829 * mpi3mr_pcierr_slot_reset - Post slot reset callback
5830 * @pdev: PCI device instance
5831 *
5832 * This function is called by the PCI error recovery driver
5833 * after a slot or link reset issued by it for the recovery, the
5834 * driver is expected to bring back the controller and
5835 * initialize it.
5836 *
5837 * This function restores PCI state and reinitializes controller
5838 * resources and the controller, this blocks for any pending
5839 * reset to complete.
5840 *
5841 * Returns: PCI_ERS_RESULT_DISCONNECT on failure or
5842 * PCI_ERS_RESULT_RECOVERED
5843 */
mpi3mr_pcierr_slot_reset(struct pci_dev * pdev)5844 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev)
5845 {
5846 struct Scsi_Host *shost;
5847 struct mpi3mr_ioc *mrioc;
5848 unsigned int timeout = MPI3MR_RESET_TIMEOUT;
5849
5850 dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
5851
5852 shost = pci_get_drvdata(pdev);
5853 mrioc = shost_priv(shost);
5854
5855 do {
5856 if (mrioc->reset_in_progress)
5857 ssleep(1);
5858 else
5859 break;
5860 } while (--timeout);
5861
5862 if (!timeout)
5863 goto out_failed;
5864
5865 pci_restore_state(pdev);
5866
5867 if (mpi3mr_setup_resources(mrioc)) {
5868 ioc_err(mrioc, "setup resources failed\n");
5869 goto out_failed;
5870 }
5871 mrioc->unrecoverable = 0;
5872 mrioc->pci_err_recovery = false;
5873
5874 if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0))
5875 goto out_failed;
5876
5877 return PCI_ERS_RESULT_RECOVERED;
5878
5879 out_failed:
5880 mrioc->unrecoverable = 1;
5881 mrioc->block_on_pci_err = false;
5882 scsi_unblock_requests(shost);
5883 mpi3mr_start_watchdog(mrioc);
5884 return PCI_ERS_RESULT_DISCONNECT;
5885 }
5886
5887 /**
5888 * mpi3mr_pcierr_resume - PCI error recovery resume
5889 * callback
5890 * @pdev: PCI device instance
5891 *
5892 * This function enables all I/O and IOCTLs post reset issued as
5893 * part of the PCI error recovery
5894 *
5895 * Return: Nothing.
5896 */
mpi3mr_pcierr_resume(struct pci_dev * pdev)5897 static void mpi3mr_pcierr_resume(struct pci_dev *pdev)
5898 {
5899 struct Scsi_Host *shost;
5900 struct mpi3mr_ioc *mrioc;
5901
5902 dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
5903
5904 shost = pci_get_drvdata(pdev);
5905 mrioc = shost_priv(shost);
5906
5907 if (mrioc->block_on_pci_err) {
5908 mrioc->block_on_pci_err = false;
5909 scsi_unblock_requests(shost);
5910 mpi3mr_start_watchdog(mrioc);
5911 }
5912 }
5913
5914 /**
5915 * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback
5916 * @pdev: PCI device instance
5917 *
5918 * This is called only if mpi3mr_pcierr_error_detected returns
5919 * PCI_ERS_RESULT_CAN_RECOVER.
5920 *
5921 * Return: PCI_ERS_RESULT_DISCONNECT when the controller is
5922 * unrecoverable or when the shost/mrioc reference cannot be
5923 * found, else return PCI_ERS_RESULT_RECOVERED
5924 */
mpi3mr_pcierr_mmio_enabled(struct pci_dev * pdev)5925 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev)
5926 {
5927 struct Scsi_Host *shost;
5928 struct mpi3mr_ioc *mrioc;
5929
5930 dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
5931
5932 shost = pci_get_drvdata(pdev);
5933 mrioc = shost_priv(shost);
5934
5935 if (mrioc->unrecoverable)
5936 return PCI_ERS_RESULT_DISCONNECT;
5937
5938 return PCI_ERS_RESULT_RECOVERED;
5939 }
5940
5941 static const struct pci_device_id mpi3mr_pci_id_table[] = {
5942 {
5943 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5944 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
5945 },
5946 {
5947 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5948 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID)
5949 },
5950 {
5951 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5952 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID)
5953 },
5954 { 0 }
5955 };
5956 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
5957
5958 static const struct pci_error_handlers mpi3mr_err_handler = {
5959 .error_detected = mpi3mr_pcierr_error_detected,
5960 .mmio_enabled = mpi3mr_pcierr_mmio_enabled,
5961 .slot_reset = mpi3mr_pcierr_slot_reset,
5962 .resume = mpi3mr_pcierr_resume,
5963 };
5964
5965 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
5966
5967 static struct pci_driver mpi3mr_pci_driver = {
5968 .name = MPI3MR_DRIVER_NAME,
5969 .id_table = mpi3mr_pci_id_table,
5970 .probe = mpi3mr_probe,
5971 .remove = mpi3mr_remove,
5972 .shutdown = mpi3mr_shutdown,
5973 .err_handler = &mpi3mr_err_handler,
5974 .driver.pm = &mpi3mr_pm_ops,
5975 };
5976
event_counter_show(struct device_driver * dd,char * buf)5977 static ssize_t event_counter_show(struct device_driver *dd, char *buf)
5978 {
5979 return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
5980 }
5981 static DRIVER_ATTR_RO(event_counter);
5982
mpi3mr_init(void)5983 static int __init mpi3mr_init(void)
5984 {
5985 int ret_val;
5986
5987 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
5988 MPI3MR_DRIVER_VERSION);
5989
5990 mpi3mr_transport_template =
5991 sas_attach_transport(&mpi3mr_transport_functions);
5992 if (!mpi3mr_transport_template) {
5993 pr_err("%s failed to load due to sas transport attach failure\n",
5994 MPI3MR_DRIVER_NAME);
5995 return -ENODEV;
5996 }
5997
5998 ret_val = pci_register_driver(&mpi3mr_pci_driver);
5999 if (ret_val) {
6000 pr_err("%s failed to load due to pci register driver failure\n",
6001 MPI3MR_DRIVER_NAME);
6002 goto err_pci_reg_fail;
6003 }
6004
6005 ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
6006 &driver_attr_event_counter);
6007 if (ret_val)
6008 goto err_event_counter;
6009
6010 return ret_val;
6011
6012 err_event_counter:
6013 pci_unregister_driver(&mpi3mr_pci_driver);
6014
6015 err_pci_reg_fail:
6016 sas_release_transport(mpi3mr_transport_template);
6017 return ret_val;
6018 }
6019
mpi3mr_exit(void)6020 static void __exit mpi3mr_exit(void)
6021 {
6022 if (warn_non_secure_ctlr)
6023 pr_warn(
6024 "Unloading %s version %s while managing a non secure controller\n",
6025 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
6026 else
6027 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
6028 MPI3MR_DRIVER_VERSION);
6029
6030 driver_remove_file(&mpi3mr_pci_driver.driver,
6031 &driver_attr_event_counter);
6032 pci_unregister_driver(&mpi3mr_pci_driver);
6033 sas_release_transport(mpi3mr_transport_template);
6034 ida_destroy(&mrioc_ida);
6035 }
6036
6037 module_init(mpi3mr_init);
6038 module_exit(mpi3mr_exit);
6039