1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 * based on qla2x00t.c code:
6 *
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 * Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30
31 #include "qla_def.h"
32 #include "qla_target.h"
33
34 static int ql2xtgt_tape_enable;
35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(ql2xtgt_tape_enable,
37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
38
39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
40 module_param(qlini_mode, charp, S_IRUGO);
41 MODULE_PARM_DESC(qlini_mode,
42 "Determines when initiator mode will be enabled. Possible values: "
43 "\"exclusive\" - initiator mode will be enabled on load, "
44 "disabled on enabling target mode and then on disabling target mode "
45 "enabled back; "
46 "\"disabled\" - initiator mode will never be enabled; "
47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
48 "when ready "
49 "\"enabled\" (default) - initiator mode will always stay enabled.");
50
51 static int ql_dm_tgt_ex_pct = 0;
52 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
53 MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
54 "For Dual Mode (qlini_mode=dual), this parameter determines "
55 "the percentage of exchanges/cmds FW will allocate resources "
56 "for Target mode.");
57
58 int ql2xuctrlirq = 1;
59 module_param(ql2xuctrlirq, int, 0644);
60 MODULE_PARM_DESC(ql2xuctrlirq,
61 "User to control IRQ placement via smp_affinity."
62 "Valid with qlini_mode=disabled."
63 "1(default): enable");
64
65 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
66
67 static int qla_sam_status = SAM_STAT_BUSY;
68 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
69
70 /*
71 * From scsi/fc/fc_fcp.h
72 */
73 enum fcp_resp_rsp_codes {
74 FCP_TMF_CMPL = 0,
75 FCP_DATA_LEN_INVALID = 1,
76 FCP_CMND_FIELDS_INVALID = 2,
77 FCP_DATA_PARAM_MISMATCH = 3,
78 FCP_TMF_REJECTED = 4,
79 FCP_TMF_FAILED = 5,
80 FCP_TMF_INVALID_LUN = 9,
81 };
82
83 /*
84 * fc_pri_ta from scsi/fc/fc_fcp.h
85 */
86 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
87 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
88 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
89 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
90 #define FCP_PTA_MASK 7 /* mask for task attribute field */
91 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
92 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
93
94 /*
95 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
96 * must be called under HW lock and could unlock/lock it inside.
97 * It isn't an issue, since in the current implementation on the time when
98 * those functions are called:
99 *
100 * - Either context is IRQ and only IRQ handler can modify HW data,
101 * including rings related fields,
102 *
103 * - Or access to target mode variables from struct qla_tgt doesn't
104 * cross those functions boundaries, except tgt_stop, which
105 * additionally protected by irq_cmd_count.
106 */
107 /* Predefs for callbacks handed to qla2xxx LLD */
108 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
109 struct atio_from_isp *pkt, uint8_t);
110 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
111 response_t *pkt);
112 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
113 int fn, void *iocb, int flags);
114 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
115 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
116 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
117 struct atio_from_isp *atio, uint16_t status, int qfull);
118 static void qlt_disable_vha(struct scsi_qla_host *vha);
119 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
120 static void qlt_send_notify_ack(struct qla_qpair *qpair,
121 struct imm_ntfy_from_isp *ntfy,
122 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
123 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
124 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
125 struct imm_ntfy_from_isp *imm, int ha_locked);
126 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
127 fc_port_t *fcport, bool local);
128 void qlt_unreg_sess(struct fc_port *sess);
129 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
130 struct abts_recv_from_24xx *);
131 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
132 uint16_t);
133 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
134 static inline uint32_t qlt_make_handle(struct qla_qpair *);
135
136 /*
137 * Global Variables
138 */
139 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
140 struct kmem_cache *qla_tgt_plogi_cachep;
141 static mempool_t *qla_tgt_mgmt_cmd_mempool;
142 static struct workqueue_struct *qla_tgt_wq;
143 static DEFINE_MUTEX(qla_tgt_mutex);
144 static LIST_HEAD(qla_tgt_glist);
145
prot_op_str(u32 prot_op)146 static const char *prot_op_str(u32 prot_op)
147 {
148 switch (prot_op) {
149 case TARGET_PROT_NORMAL: return "NORMAL";
150 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
151 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
152 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
153 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
154 case TARGET_PROT_DIN_PASS: return "DIN_PASS";
155 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
156 default: return "UNKNOWN";
157 }
158 }
159
160 /* This API intentionally takes dest as a parameter, rather than returning
161 * int value to avoid caller forgetting to issue wmb() after the store */
qlt_do_generation_tick(struct scsi_qla_host * vha,int * dest)162 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
163 {
164 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
165 *dest = atomic_inc_return(&base_vha->generation_tick);
166 /* memory barrier */
167 wmb();
168 }
169
170 /* Might release hw lock, then reaquire!! */
qlt_issue_marker(struct scsi_qla_host * vha,int vha_locked)171 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
172 {
173 /* Send marker if required */
174 if (unlikely(vha->marker_needed != 0)) {
175 int rc = qla2x00_issue_marker(vha, vha_locked);
176
177 if (rc != QLA_SUCCESS) {
178 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
179 "qla_target(%d): issue_marker() failed\n",
180 vha->vp_idx);
181 }
182 return rc;
183 }
184 return QLA_SUCCESS;
185 }
186
187 static inline
qlt_find_host_by_d_id(struct scsi_qla_host * vha,be_id_t d_id)188 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
189 be_id_t d_id)
190 {
191 struct scsi_qla_host *host;
192 uint32_t key;
193
194 if (vha->d_id.b.area == d_id.area &&
195 vha->d_id.b.domain == d_id.domain &&
196 vha->d_id.b.al_pa == d_id.al_pa)
197 return vha;
198
199 key = be_to_port_id(d_id).b24;
200
201 host = btree_lookup32(&vha->hw->tgt.host_map, key);
202 if (!host)
203 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
204 "Unable to find host %06x\n", key);
205
206 return host;
207 }
208
209 static inline
qlt_find_host_by_vp_idx(struct scsi_qla_host * vha,uint16_t vp_idx)210 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
211 uint16_t vp_idx)
212 {
213 struct qla_hw_data *ha = vha->hw;
214
215 if (vha->vp_idx == vp_idx)
216 return vha;
217
218 BUG_ON(ha->tgt.tgt_vp_map == NULL);
219 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
220 return ha->tgt.tgt_vp_map[vp_idx].vha;
221
222 return NULL;
223 }
224
qlt_incr_num_pend_cmds(struct scsi_qla_host * vha)225 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
226 {
227 unsigned long flags;
228
229 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
230
231 vha->hw->tgt.num_pend_cmds++;
232 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
233 vha->qla_stats.stat_max_pend_cmds =
234 vha->hw->tgt.num_pend_cmds;
235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
236 }
qlt_decr_num_pend_cmds(struct scsi_qla_host * vha)237 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
238 {
239 unsigned long flags;
240
241 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
242 vha->hw->tgt.num_pend_cmds--;
243 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
244 }
245
246
qlt_queue_unknown_atio(scsi_qla_host_t * vha,struct atio_from_isp * atio,uint8_t ha_locked)247 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
248 struct atio_from_isp *atio, uint8_t ha_locked)
249 {
250 struct qla_tgt_sess_op *u;
251 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
252 unsigned long flags;
253
254 if (tgt->tgt_stop) {
255 ql_dbg(ql_dbg_async, vha, 0x502c,
256 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
257 vha->vp_idx);
258 goto out_term;
259 }
260
261 u = kzalloc(sizeof(*u), GFP_ATOMIC);
262 if (u == NULL)
263 goto out_term;
264
265 u->vha = vha;
266 memcpy(&u->atio, atio, sizeof(*atio));
267 INIT_LIST_HEAD(&u->cmd_list);
268
269 spin_lock_irqsave(&vha->cmd_list_lock, flags);
270 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
271 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
272
273 schedule_delayed_work(&vha->unknown_atio_work, 1);
274
275 out:
276 return;
277
278 out_term:
279 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
280 goto out;
281 }
282
qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host * vha,uint8_t ha_locked)283 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
284 uint8_t ha_locked)
285 {
286 struct qla_tgt_sess_op *u, *t;
287 scsi_qla_host_t *host;
288 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
289 unsigned long flags;
290 uint8_t queued = 0;
291
292 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
293 if (u->aborted) {
294 ql_dbg(ql_dbg_async, vha, 0x502e,
295 "Freeing unknown %s %p, because of Abort\n",
296 "ATIO_TYPE7", u);
297 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
298 &u->atio, ha_locked, 0);
299 goto abort;
300 }
301
302 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
303 if (host != NULL) {
304 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
305 "Requeuing unknown ATIO_TYPE7 %p\n", u);
306 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
307 } else if (tgt->tgt_stop) {
308 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
309 "Freeing unknown %s %p, because tgt is being stopped\n",
310 "ATIO_TYPE7", u);
311 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
312 &u->atio, ha_locked, 0);
313 } else {
314 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
315 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
316 if (!queued) {
317 queued = 1;
318 schedule_delayed_work(&vha->unknown_atio_work,
319 1);
320 }
321 continue;
322 }
323
324 abort:
325 spin_lock_irqsave(&vha->cmd_list_lock, flags);
326 list_del(&u->cmd_list);
327 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
328 kfree(u);
329 }
330 }
331
qlt_unknown_atio_work_fn(struct work_struct * work)332 void qlt_unknown_atio_work_fn(struct work_struct *work)
333 {
334 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
335 struct scsi_qla_host, unknown_atio_work);
336
337 qlt_try_to_dequeue_unknown_atios(vha, 0);
338 }
339
qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)340 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
341 struct atio_from_isp *atio, uint8_t ha_locked)
342 {
343 ql_dbg(ql_dbg_tgt, vha, 0xe072,
344 "%s: qla_target(%d): type %x ox_id %04x\n",
345 __func__, vha->vp_idx, atio->u.raw.entry_type,
346 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
347
348 switch (atio->u.raw.entry_type) {
349 case ATIO_TYPE7:
350 {
351 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
352 atio->u.isp24.fcp_hdr.d_id);
353 if (unlikely(NULL == host)) {
354 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
355 "qla_target(%d): Received ATIO_TYPE7 "
356 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
357 atio->u.isp24.fcp_hdr.d_id.domain,
358 atio->u.isp24.fcp_hdr.d_id.area,
359 atio->u.isp24.fcp_hdr.d_id.al_pa);
360
361
362 qlt_queue_unknown_atio(vha, atio, ha_locked);
363 break;
364 }
365 if (unlikely(!list_empty(&vha->unknown_atio_list)))
366 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
367
368 qlt_24xx_atio_pkt(host, atio, ha_locked);
369 break;
370 }
371
372 case IMMED_NOTIFY_TYPE:
373 {
374 struct scsi_qla_host *host = vha;
375 struct imm_ntfy_from_isp *entry =
376 (struct imm_ntfy_from_isp *)atio;
377
378 qlt_issue_marker(vha, ha_locked);
379
380 if ((entry->u.isp24.vp_index != 0xFF) &&
381 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
382 host = qlt_find_host_by_vp_idx(vha,
383 entry->u.isp24.vp_index);
384 if (unlikely(!host)) {
385 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
386 "qla_target(%d): Received "
387 "ATIO (IMMED_NOTIFY_TYPE) "
388 "with unknown vp_index %d\n",
389 vha->vp_idx, entry->u.isp24.vp_index);
390 break;
391 }
392 }
393 qlt_24xx_atio_pkt(host, atio, ha_locked);
394 break;
395 }
396
397 case VP_RPT_ID_IOCB_TYPE:
398 qla24xx_report_id_acquisition(vha,
399 (struct vp_rpt_id_entry_24xx *)atio);
400 break;
401
402 case ABTS_RECV_24XX:
403 {
404 struct abts_recv_from_24xx *entry =
405 (struct abts_recv_from_24xx *)atio;
406 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
407 entry->vp_index);
408 unsigned long flags;
409
410 if (unlikely(!host)) {
411 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
412 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
413 "received, with unknown vp_index %d\n",
414 vha->vp_idx, entry->vp_index);
415 break;
416 }
417 if (!ha_locked)
418 spin_lock_irqsave(&host->hw->hardware_lock, flags);
419 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
420 if (!ha_locked)
421 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
422 break;
423 }
424
425 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
426
427 default:
428 ql_dbg(ql_dbg_tgt, vha, 0xe040,
429 "qla_target(%d): Received unknown ATIO atio "
430 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
431 break;
432 }
433
434 return false;
435 }
436
qlt_response_pkt_all_vps(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)437 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
438 struct rsp_que *rsp, response_t *pkt)
439 {
440 switch (pkt->entry_type) {
441 case CTIO_CRC2:
442 ql_dbg(ql_dbg_tgt, vha, 0xe073,
443 "qla_target(%d):%s: CRC2 Response pkt\n",
444 vha->vp_idx, __func__);
445 fallthrough;
446 case CTIO_TYPE7:
447 {
448 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
449 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
450 entry->vp_index);
451 if (unlikely(!host)) {
452 ql_dbg(ql_dbg_tgt, vha, 0xe041,
453 "qla_target(%d): Response pkt (CTIO_TYPE7) "
454 "received, with unknown vp_index %d\n",
455 vha->vp_idx, entry->vp_index);
456 break;
457 }
458 qlt_response_pkt(host, rsp, pkt);
459 break;
460 }
461
462 case IMMED_NOTIFY_TYPE:
463 {
464 struct scsi_qla_host *host;
465 struct imm_ntfy_from_isp *entry =
466 (struct imm_ntfy_from_isp *)pkt;
467
468 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
469 if (unlikely(!host)) {
470 ql_dbg(ql_dbg_tgt, vha, 0xe042,
471 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
472 "received, with unknown vp_index %d\n",
473 vha->vp_idx, entry->u.isp24.vp_index);
474 break;
475 }
476 qlt_response_pkt(host, rsp, pkt);
477 break;
478 }
479
480 case NOTIFY_ACK_TYPE:
481 {
482 struct scsi_qla_host *host = vha;
483 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
484
485 if (0xFF != entry->u.isp24.vp_index) {
486 host = qlt_find_host_by_vp_idx(vha,
487 entry->u.isp24.vp_index);
488 if (unlikely(!host)) {
489 ql_dbg(ql_dbg_tgt, vha, 0xe043,
490 "qla_target(%d): Response "
491 "pkt (NOTIFY_ACK_TYPE) "
492 "received, with unknown "
493 "vp_index %d\n", vha->vp_idx,
494 entry->u.isp24.vp_index);
495 break;
496 }
497 }
498 qlt_response_pkt(host, rsp, pkt);
499 break;
500 }
501
502 case ABTS_RECV_24XX:
503 {
504 struct abts_recv_from_24xx *entry =
505 (struct abts_recv_from_24xx *)pkt;
506 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
507 entry->vp_index);
508 if (unlikely(!host)) {
509 ql_dbg(ql_dbg_tgt, vha, 0xe044,
510 "qla_target(%d): Response pkt "
511 "(ABTS_RECV_24XX) received, with unknown "
512 "vp_index %d\n", vha->vp_idx, entry->vp_index);
513 break;
514 }
515 qlt_response_pkt(host, rsp, pkt);
516 break;
517 }
518
519 case ABTS_RESP_24XX:
520 {
521 struct abts_resp_to_24xx *entry =
522 (struct abts_resp_to_24xx *)pkt;
523 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
524 entry->vp_index);
525 if (unlikely(!host)) {
526 ql_dbg(ql_dbg_tgt, vha, 0xe045,
527 "qla_target(%d): Response pkt "
528 "(ABTS_RECV_24XX) received, with unknown "
529 "vp_index %d\n", vha->vp_idx, entry->vp_index);
530 break;
531 }
532 qlt_response_pkt(host, rsp, pkt);
533 break;
534 }
535 default:
536 qlt_response_pkt(vha, rsp, pkt);
537 break;
538 }
539
540 }
541
542 /*
543 * All qlt_plogi_ack_t operations are protected by hardware_lock
544 */
qla24xx_post_nack_work(struct scsi_qla_host * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)545 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
546 struct imm_ntfy_from_isp *ntfy, int type)
547 {
548 struct qla_work_evt *e;
549
550 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
551 if (!e)
552 return QLA_FUNCTION_FAILED;
553
554 e->u.nack.fcport = fcport;
555 e->u.nack.type = type;
556 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
557 return qla2x00_post_work(vha, e);
558 }
559
qla2x00_async_nack_sp_done(srb_t * sp,int res)560 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
561 {
562 struct scsi_qla_host *vha = sp->vha;
563 unsigned long flags;
564
565 ql_dbg(ql_dbg_disc, vha, 0x20f2,
566 "Async done-%s res %x %8phC type %d\n",
567 sp->name, res, sp->fcport->port_name, sp->type);
568
569 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
570 sp->fcport->flags &= ~FCF_ASYNC_SENT;
571 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
572
573 switch (sp->type) {
574 case SRB_NACK_PLOGI:
575 sp->fcport->login_gen++;
576 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
577 sp->fcport->logout_on_delete = 1;
578 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
579 sp->fcport->send_els_logo = 0;
580 break;
581
582 case SRB_NACK_PRLI:
583 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
584 sp->fcport->deleted = 0;
585 sp->fcport->send_els_logo = 0;
586
587 if (!sp->fcport->login_succ &&
588 !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
589 sp->fcport->login_succ = 1;
590
591 vha->fcport_count++;
592 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
593 qla24xx_sched_upd_fcport(sp->fcport);
594 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
595 } else {
596 sp->fcport->login_retry = 0;
597 qla2x00_set_fcport_disc_state(sp->fcport,
598 DSC_LOGIN_COMPLETE);
599 sp->fcport->deleted = 0;
600 sp->fcport->logout_on_delete = 1;
601 }
602 break;
603
604 case SRB_NACK_LOGO:
605 sp->fcport->login_gen++;
606 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
607 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
608 break;
609 }
610 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
611
612 sp->free(sp);
613 }
614
qla24xx_async_notify_ack(scsi_qla_host_t * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)615 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
616 struct imm_ntfy_from_isp *ntfy, int type)
617 {
618 int rval = QLA_FUNCTION_FAILED;
619 srb_t *sp;
620 char *c = NULL;
621
622 fcport->flags |= FCF_ASYNC_SENT;
623 switch (type) {
624 case SRB_NACK_PLOGI:
625 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
626 c = "PLOGI";
627 break;
628 case SRB_NACK_PRLI:
629 fcport->fw_login_state = DSC_LS_PRLI_PEND;
630 fcport->deleted = 0;
631 c = "PRLI";
632 break;
633 case SRB_NACK_LOGO:
634 fcport->fw_login_state = DSC_LS_LOGO_PEND;
635 c = "LOGO";
636 break;
637 }
638
639 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
640 if (!sp)
641 goto done;
642
643 sp->type = type;
644 sp->name = "nack";
645
646 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
647 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
648
649 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
650 sp->done = qla2x00_async_nack_sp_done;
651
652 ql_dbg(ql_dbg_disc, vha, 0x20f4,
653 "Async-%s %8phC hndl %x %s\n",
654 sp->name, fcport->port_name, sp->handle, c);
655
656 rval = qla2x00_start_sp(sp);
657 if (rval != QLA_SUCCESS)
658 goto done_free_sp;
659
660 return rval;
661
662 done_free_sp:
663 sp->free(sp);
664 done:
665 fcport->flags &= ~FCF_ASYNC_SENT;
666 return rval;
667 }
668
qla24xx_do_nack_work(struct scsi_qla_host * vha,struct qla_work_evt * e)669 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
670 {
671 fc_port_t *t;
672
673 switch (e->u.nack.type) {
674 case SRB_NACK_PRLI:
675 t = e->u.nack.fcport;
676 flush_work(&t->del_work);
677 flush_work(&t->free_work);
678 mutex_lock(&vha->vha_tgt.tgt_mutex);
679 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
680 mutex_unlock(&vha->vha_tgt.tgt_mutex);
681 if (t) {
682 ql_log(ql_log_info, vha, 0xd034,
683 "%s create sess success %p", __func__, t);
684 /* create sess has an extra kref */
685 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
686 }
687 break;
688 }
689 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
690 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
691 }
692
qla24xx_delete_sess_fn(struct work_struct * work)693 void qla24xx_delete_sess_fn(struct work_struct *work)
694 {
695 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
696 struct qla_hw_data *ha = fcport->vha->hw;
697
698 if (fcport->se_sess) {
699 ha->tgt.tgt_ops->shutdown_sess(fcport);
700 ha->tgt.tgt_ops->put_sess(fcport);
701 } else {
702 qlt_unreg_sess(fcport);
703 }
704 }
705
706 /*
707 * Called from qla2x00_reg_remote_port()
708 */
qlt_fc_port_added(struct scsi_qla_host * vha,fc_port_t * fcport)709 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
710 {
711 struct qla_hw_data *ha = vha->hw;
712 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
713 struct fc_port *sess = fcport;
714 unsigned long flags;
715
716 if (!vha->hw->tgt.tgt_ops)
717 return;
718
719 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
720 if (tgt->tgt_stop) {
721 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
722 return;
723 }
724
725 if (fcport->disc_state == DSC_DELETE_PEND) {
726 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
727 return;
728 }
729
730 if (!sess->se_sess) {
731 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
732
733 mutex_lock(&vha->vha_tgt.tgt_mutex);
734 sess = qlt_create_sess(vha, fcport, false);
735 mutex_unlock(&vha->vha_tgt.tgt_mutex);
736
737 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
738 } else {
739 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
740 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
741 return;
742 }
743
744 if (!kref_get_unless_zero(&sess->sess_kref)) {
745 ql_dbg(ql_dbg_disc, vha, 0x2107,
746 "%s: kref_get fail sess %8phC \n",
747 __func__, sess->port_name);
748 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
749 return;
750 }
751
752 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
753 "qla_target(%u): %ssession for port %8phC "
754 "(loop ID %d) reappeared\n", vha->vp_idx,
755 sess->local ? "local " : "", sess->port_name, sess->loop_id);
756
757 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
758 "Reappeared sess %p\n", sess);
759
760 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
761 fcport->loop_id,
762 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
763 }
764
765 if (sess && sess->local) {
766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
767 "qla_target(%u): local session for "
768 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
769 fcport->port_name, sess->loop_id);
770 sess->local = 0;
771 }
772 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
773
774 ha->tgt.tgt_ops->put_sess(sess);
775 }
776
777 /*
778 * This is a zero-base ref-counting solution, since hardware_lock
779 * guarantees that ref_count is not modified concurrently.
780 * Upon successful return content of iocb is undefined
781 */
782 static struct qlt_plogi_ack_t *
qlt_plogi_ack_find_add(struct scsi_qla_host * vha,port_id_t * id,struct imm_ntfy_from_isp * iocb)783 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
784 struct imm_ntfy_from_isp *iocb)
785 {
786 struct qlt_plogi_ack_t *pla;
787
788 lockdep_assert_held(&vha->hw->hardware_lock);
789
790 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
791 if (pla->id.b24 == id->b24) {
792 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
793 "%s %d %8phC Term INOT due to new INOT",
794 __func__, __LINE__,
795 pla->iocb.u.isp24.port_name);
796 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
797 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
798 return pla;
799 }
800 }
801
802 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
803 if (!pla) {
804 ql_dbg(ql_dbg_async, vha, 0x5088,
805 "qla_target(%d): Allocation of plogi_ack failed\n",
806 vha->vp_idx);
807 return NULL;
808 }
809
810 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
811 pla->id = *id;
812 list_add_tail(&pla->list, &vha->plogi_ack_list);
813
814 return pla;
815 }
816
qlt_plogi_ack_unref(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla)817 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
818 struct qlt_plogi_ack_t *pla)
819 {
820 struct imm_ntfy_from_isp *iocb = &pla->iocb;
821 port_id_t port_id;
822 uint16_t loop_id;
823 fc_port_t *fcport = pla->fcport;
824
825 BUG_ON(!pla->ref_count);
826 pla->ref_count--;
827
828 if (pla->ref_count)
829 return;
830
831 ql_dbg(ql_dbg_disc, vha, 0x5089,
832 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
833 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
834 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
835 iocb->u.isp24.port_id[0],
836 le16_to_cpu(iocb->u.isp24.nport_handle),
837 iocb->u.isp24.exchange_address, iocb->ox_id);
838
839 port_id.b.domain = iocb->u.isp24.port_id[2];
840 port_id.b.area = iocb->u.isp24.port_id[1];
841 port_id.b.al_pa = iocb->u.isp24.port_id[0];
842 port_id.b.rsvd_1 = 0;
843
844 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
845
846 fcport->loop_id = loop_id;
847 fcport->d_id = port_id;
848 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
849 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
850 else
851 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
852
853 list_for_each_entry(fcport, &vha->vp_fcports, list) {
854 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
855 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
856 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
857 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
858 }
859
860 list_del(&pla->list);
861 kmem_cache_free(qla_tgt_plogi_cachep, pla);
862 }
863
864 void
qlt_plogi_ack_link(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla,struct fc_port * sess,enum qlt_plogi_link_t link)865 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
866 struct fc_port *sess, enum qlt_plogi_link_t link)
867 {
868 struct imm_ntfy_from_isp *iocb = &pla->iocb;
869 /* Inc ref_count first because link might already be pointing at pla */
870 pla->ref_count++;
871
872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
873 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
874 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
875 sess, link, sess->port_name,
876 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
877 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
878 pla->ref_count, pla, link);
879
880 if (link == QLT_PLOGI_LINK_CONFLICT) {
881 switch (sess->disc_state) {
882 case DSC_DELETED:
883 case DSC_DELETE_PEND:
884 pla->ref_count--;
885 return;
886 default:
887 break;
888 }
889 }
890
891 if (sess->plogi_link[link])
892 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
893
894 if (link == QLT_PLOGI_LINK_SAME_WWN)
895 pla->fcport = sess;
896
897 sess->plogi_link[link] = pla;
898 }
899
900 typedef struct {
901 /* These fields must be initialized by the caller */
902 port_id_t id;
903 /*
904 * number of cmds dropped while we were waiting for
905 * initiator to ack LOGO initialize to 1 if LOGO is
906 * triggered by a command, otherwise, to 0
907 */
908 int cmd_count;
909
910 /* These fields are used by callee */
911 struct list_head list;
912 } qlt_port_logo_t;
913
914 static void
qlt_send_first_logo(struct scsi_qla_host * vha,qlt_port_logo_t * logo)915 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
916 {
917 qlt_port_logo_t *tmp;
918 int res;
919
920 mutex_lock(&vha->vha_tgt.tgt_mutex);
921
922 list_for_each_entry(tmp, &vha->logo_list, list) {
923 if (tmp->id.b24 == logo->id.b24) {
924 tmp->cmd_count += logo->cmd_count;
925 mutex_unlock(&vha->vha_tgt.tgt_mutex);
926 return;
927 }
928 }
929
930 list_add_tail(&logo->list, &vha->logo_list);
931
932 mutex_unlock(&vha->vha_tgt.tgt_mutex);
933
934 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
935
936 mutex_lock(&vha->vha_tgt.tgt_mutex);
937 list_del(&logo->list);
938 mutex_unlock(&vha->vha_tgt.tgt_mutex);
939
940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
941 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
942 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
943 logo->cmd_count, res);
944 }
945
qlt_free_session_done(struct work_struct * work)946 void qlt_free_session_done(struct work_struct *work)
947 {
948 struct fc_port *sess = container_of(work, struct fc_port,
949 free_work);
950 struct qla_tgt *tgt = sess->tgt;
951 struct scsi_qla_host *vha = sess->vha;
952 struct qla_hw_data *ha = vha->hw;
953 unsigned long flags;
954 bool logout_started = false;
955 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
956 struct qlt_plogi_ack_t *own =
957 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
958
959 ql_dbg(ql_dbg_disc, vha, 0xf084,
960 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
961 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
962 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
963 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
964 sess->logout_on_delete, sess->keep_nport_handle,
965 sess->send_els_logo);
966
967 if (!IS_SW_RESV_ADDR(sess->d_id)) {
968 qla2x00_mark_device_lost(vha, sess, 0);
969
970 if (sess->send_els_logo) {
971 qlt_port_logo_t logo;
972
973 logo.id = sess->d_id;
974 logo.cmd_count = 0;
975 if (!own)
976 qlt_send_first_logo(vha, &logo);
977 sess->send_els_logo = 0;
978 }
979
980 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
981 int rc;
982
983 if (!own ||
984 (own &&
985 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
986 rc = qla2x00_post_async_logout_work(vha, sess,
987 NULL);
988 if (rc != QLA_SUCCESS)
989 ql_log(ql_log_warn, vha, 0xf085,
990 "Schedule logo failed sess %p rc %d\n",
991 sess, rc);
992 else
993 logout_started = true;
994 } else if (own && (own->iocb.u.isp24.status_subcode ==
995 ELS_PRLI) && ha->flags.rida_fmt2) {
996 rc = qla2x00_post_async_prlo_work(vha, sess,
997 NULL);
998 if (rc != QLA_SUCCESS)
999 ql_log(ql_log_warn, vha, 0xf085,
1000 "Schedule PRLO failed sess %p rc %d\n",
1001 sess, rc);
1002 else
1003 logout_started = true;
1004 }
1005 } /* if sess->logout_on_delete */
1006
1007 if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1008 !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1009 sess->nvme_flag |= NVME_FLAG_DELETING;
1010 qla_nvme_unregister_remote_port(sess);
1011 }
1012 }
1013
1014 /*
1015 * Release the target session for FC Nexus from fabric module code.
1016 */
1017 if (sess->se_sess != NULL)
1018 ha->tgt.tgt_ops->free_session(sess);
1019
1020 if (logout_started) {
1021 bool traced = false;
1022 u16 cnt = 0;
1023
1024 while (!READ_ONCE(sess->logout_completed)) {
1025 if (!traced) {
1026 ql_dbg(ql_dbg_disc, vha, 0xf086,
1027 "%s: waiting for sess %p logout\n",
1028 __func__, sess);
1029 traced = true;
1030 }
1031 msleep(100);
1032 cnt++;
1033 if (cnt > 200)
1034 break;
1035 }
1036
1037 ql_dbg(ql_dbg_disc, vha, 0xf087,
1038 "%s: sess %p logout completed\n", __func__, sess);
1039 }
1040
1041 if (sess->logo_ack_needed) {
1042 sess->logo_ack_needed = 0;
1043 qla24xx_async_notify_ack(vha, sess,
1044 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1045 }
1046
1047 spin_lock_irqsave(&vha->work_lock, flags);
1048 sess->flags &= ~FCF_ASYNC_SENT;
1049 spin_unlock_irqrestore(&vha->work_lock, flags);
1050
1051 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1052 if (sess->se_sess) {
1053 sess->se_sess = NULL;
1054 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1055 tgt->sess_count--;
1056 }
1057
1058 qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1059 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1060 sess->deleted = QLA_SESS_DELETED;
1061
1062 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1063 vha->fcport_count--;
1064 sess->login_succ = 0;
1065 }
1066
1067 qla2x00_clear_loop_id(sess);
1068
1069 if (sess->conflict) {
1070 sess->conflict->login_pause = 0;
1071 sess->conflict = NULL;
1072 if (!test_bit(UNLOADING, &vha->dpc_flags))
1073 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1074 }
1075
1076 {
1077 struct qlt_plogi_ack_t *con =
1078 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1079 struct imm_ntfy_from_isp *iocb;
1080
1081 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1082
1083 if (con) {
1084 iocb = &con->iocb;
1085 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1086 "se_sess %p / sess %p port %8phC is gone,"
1087 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1088 sess->se_sess, sess, sess->port_name,
1089 own ? "releasing own PLOGI" : "no own PLOGI pending",
1090 own ? own->ref_count : -1,
1091 iocb->u.isp24.port_name, con->ref_count);
1092 qlt_plogi_ack_unref(vha, con);
1093 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1094 } else {
1095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1096 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1097 sess->se_sess, sess, sess->port_name,
1098 own ? "releasing own PLOGI" :
1099 "no own PLOGI pending",
1100 own ? own->ref_count : -1);
1101 }
1102
1103 if (own) {
1104 sess->fw_login_state = DSC_LS_PLOGI_PEND;
1105 qlt_plogi_ack_unref(vha, own);
1106 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1107 }
1108 }
1109
1110 sess->explicit_logout = 0;
1111 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1112 sess->free_pending = 0;
1113
1114 qla2x00_dfs_remove_rport(vha, sess);
1115
1116 ql_dbg(ql_dbg_disc, vha, 0xf001,
1117 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1118 sess, sess->port_name, vha->fcport_count);
1119
1120 if (tgt && (tgt->sess_count == 0))
1121 wake_up_all(&tgt->waitQ);
1122
1123 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1124 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1125 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1126 switch (vha->host->active_mode) {
1127 case MODE_INITIATOR:
1128 case MODE_DUAL:
1129 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1130 qla2xxx_wake_dpc(vha);
1131 break;
1132 case MODE_TARGET:
1133 default:
1134 /* no-op */
1135 break;
1136 }
1137 }
1138
1139 if (vha->fcport_count == 0)
1140 wake_up_all(&vha->fcport_waitQ);
1141 }
1142
1143 /* ha->tgt.sess_lock supposed to be held on entry */
qlt_unreg_sess(struct fc_port * sess)1144 void qlt_unreg_sess(struct fc_port *sess)
1145 {
1146 struct scsi_qla_host *vha = sess->vha;
1147 unsigned long flags;
1148
1149 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1150 "%s sess %p for deletion %8phC\n",
1151 __func__, sess, sess->port_name);
1152
1153 spin_lock_irqsave(&sess->vha->work_lock, flags);
1154 if (sess->free_pending) {
1155 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1156 return;
1157 }
1158 sess->free_pending = 1;
1159 /*
1160 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1161 * management from being sent.
1162 */
1163 sess->flags |= FCF_ASYNC_SENT;
1164 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1165
1166 if (sess->se_sess)
1167 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1168
1169 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1170 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1171 sess->last_rscn_gen = sess->rscn_gen;
1172 sess->last_login_gen = sess->login_gen;
1173
1174 queue_work(sess->vha->hw->wq, &sess->free_work);
1175 }
1176 EXPORT_SYMBOL(qlt_unreg_sess);
1177
qlt_reset(struct scsi_qla_host * vha,void * iocb,int mcmd)1178 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1179 {
1180 struct qla_hw_data *ha = vha->hw;
1181 struct fc_port *sess = NULL;
1182 uint16_t loop_id;
1183 int res = 0;
1184 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1185 unsigned long flags;
1186
1187 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1188 if (loop_id == 0xFFFF) {
1189 /* Global event */
1190 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1191 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1192 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1193 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1194 } else {
1195 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1196 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1197 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1198 }
1199
1200 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1201 "Using sess for qla_tgt_reset: %p\n", sess);
1202 if (!sess) {
1203 res = -ESRCH;
1204 return res;
1205 }
1206
1207 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1208 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1209 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1210 mcmd, loop_id);
1211
1212 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1213 }
1214
qla24xx_chk_fcp_state(struct fc_port * sess)1215 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1216 {
1217 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1218 sess->logout_on_delete = 0;
1219 sess->logo_ack_needed = 0;
1220 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1221 }
1222 }
1223
qlt_schedule_sess_for_deletion(struct fc_port * sess)1224 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1225 {
1226 struct qla_tgt *tgt = sess->tgt;
1227 unsigned long flags;
1228 u16 sec;
1229
1230 switch (sess->disc_state) {
1231 case DSC_DELETE_PEND:
1232 return;
1233 case DSC_DELETED:
1234 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1235 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
1236 if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1237 wake_up_all(&tgt->waitQ);
1238
1239 if (sess->vha->fcport_count == 0)
1240 wake_up_all(&sess->vha->fcport_waitQ);
1241 return;
1242 }
1243 break;
1244 case DSC_UPD_FCPORT:
1245 /*
1246 * This port is not done reporting to upper layer.
1247 * let it finish
1248 */
1249 sess->next_disc_state = DSC_DELETE_PEND;
1250 sec = jiffies_to_msecs(jiffies -
1251 sess->jiffies_at_registration)/1000;
1252 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1253 sess->sec_since_registration = sec;
1254 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1255 "%s %8phC : Slow Rport registration(%d Sec)\n",
1256 __func__, sess->port_name, sec);
1257 }
1258 return;
1259 default:
1260 break;
1261 }
1262
1263 spin_lock_irqsave(&sess->vha->work_lock, flags);
1264 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1265 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1266 return;
1267 }
1268 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1269 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1270
1271 sess->prli_pend_timer = 0;
1272 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1273
1274 qla24xx_chk_fcp_state(sess);
1275
1276 ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
1277 "Scheduling sess %p for deletion %8phC\n",
1278 sess, sess->port_name);
1279
1280 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1281 }
1282
qlt_clear_tgt_db(struct qla_tgt * tgt)1283 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1284 {
1285 struct fc_port *sess;
1286 scsi_qla_host_t *vha = tgt->vha;
1287
1288 list_for_each_entry(sess, &vha->vp_fcports, list) {
1289 if (sess->se_sess)
1290 qlt_schedule_sess_for_deletion(sess);
1291 }
1292
1293 /* At this point tgt could be already dead */
1294 }
1295
qla24xx_get_loop_id(struct scsi_qla_host * vha,be_id_t s_id,uint16_t * loop_id)1296 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1297 uint16_t *loop_id)
1298 {
1299 struct qla_hw_data *ha = vha->hw;
1300 dma_addr_t gid_list_dma;
1301 struct gid_list_info *gid_list, *gid;
1302 int res, rc, i;
1303 uint16_t entries;
1304
1305 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1306 &gid_list_dma, GFP_KERNEL);
1307 if (!gid_list) {
1308 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1309 "qla_target(%d): DMA Alloc failed of %u\n",
1310 vha->vp_idx, qla2x00_gid_list_size(ha));
1311 return -ENOMEM;
1312 }
1313
1314 /* Get list of logged in devices */
1315 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1316 if (rc != QLA_SUCCESS) {
1317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1318 "qla_target(%d): get_id_list() failed: %x\n",
1319 vha->vp_idx, rc);
1320 res = -EBUSY;
1321 goto out_free_id_list;
1322 }
1323
1324 gid = gid_list;
1325 res = -ENOENT;
1326 for (i = 0; i < entries; i++) {
1327 if (gid->al_pa == s_id.al_pa &&
1328 gid->area == s_id.area &&
1329 gid->domain == s_id.domain) {
1330 *loop_id = le16_to_cpu(gid->loop_id);
1331 res = 0;
1332 break;
1333 }
1334 gid = (void *)gid + ha->gid_list_info_size;
1335 }
1336
1337 out_free_id_list:
1338 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1339 gid_list, gid_list_dma);
1340 return res;
1341 }
1342
1343 /*
1344 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1345 * Caller must put it.
1346 */
qlt_create_sess(struct scsi_qla_host * vha,fc_port_t * fcport,bool local)1347 static struct fc_port *qlt_create_sess(
1348 struct scsi_qla_host *vha,
1349 fc_port_t *fcport,
1350 bool local)
1351 {
1352 struct qla_hw_data *ha = vha->hw;
1353 struct fc_port *sess = fcport;
1354 unsigned long flags;
1355
1356 if (vha->vha_tgt.qla_tgt->tgt_stop)
1357 return NULL;
1358
1359 if (fcport->se_sess) {
1360 if (!kref_get_unless_zero(&sess->sess_kref)) {
1361 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1362 "%s: kref_get_unless_zero failed for %8phC\n",
1363 __func__, sess->port_name);
1364 return NULL;
1365 }
1366 return fcport;
1367 }
1368 sess->tgt = vha->vha_tgt.qla_tgt;
1369 sess->local = local;
1370
1371 /*
1372 * Under normal circumstances we want to logout from firmware when
1373 * session eventually ends and release corresponding nport handle.
1374 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1375 * code will adjust these flags as necessary.
1376 */
1377 sess->logout_on_delete = 1;
1378 sess->keep_nport_handle = 0;
1379 sess->logout_completed = 0;
1380
1381 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1382 &fcport->port_name[0], sess) < 0) {
1383 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1384 "(%d) %8phC check_initiator_node_acl failed\n",
1385 vha->vp_idx, fcport->port_name);
1386 return NULL;
1387 } else {
1388 kref_init(&fcport->sess_kref);
1389 /*
1390 * Take an extra reference to ->sess_kref here to handle
1391 * fc_port access across ->tgt.sess_lock reaquire.
1392 */
1393 if (!kref_get_unless_zero(&sess->sess_kref)) {
1394 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1395 "%s: kref_get_unless_zero failed for %8phC\n",
1396 __func__, sess->port_name);
1397 return NULL;
1398 }
1399
1400 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1401 if (!IS_SW_RESV_ADDR(sess->d_id))
1402 vha->vha_tgt.qla_tgt->sess_count++;
1403
1404 qlt_do_generation_tick(vha, &sess->generation);
1405 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1406 }
1407
1408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1409 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1410 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1411 vha->vha_tgt.qla_tgt->sess_count);
1412
1413 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1414 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1415 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1416 vha->vp_idx, local ? "local " : "", fcport->port_name,
1417 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1418 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
1419
1420 return sess;
1421 }
1422
1423 /*
1424 * max_gen - specifies maximum session generation
1425 * at which this deletion requestion is still valid
1426 */
1427 void
qlt_fc_port_deleted(struct scsi_qla_host * vha,fc_port_t * fcport,int max_gen)1428 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1429 {
1430 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1431 struct fc_port *sess = fcport;
1432 unsigned long flags;
1433
1434 if (!vha->hw->tgt.tgt_ops)
1435 return;
1436
1437 if (!tgt)
1438 return;
1439
1440 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1441 if (tgt->tgt_stop) {
1442 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1443 return;
1444 }
1445 if (!sess->se_sess) {
1446 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1447 return;
1448 }
1449
1450 if (max_gen - sess->generation < 0) {
1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1453 "Ignoring stale deletion request for se_sess %p / sess %p"
1454 " for port %8phC, req_gen %d, sess_gen %d\n",
1455 sess->se_sess, sess, sess->port_name, max_gen,
1456 sess->generation);
1457 return;
1458 }
1459
1460 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1461
1462 sess->local = 1;
1463 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1464 qlt_schedule_sess_for_deletion(sess);
1465 }
1466
test_tgt_sess_count(struct qla_tgt * tgt)1467 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1468 {
1469 struct qla_hw_data *ha = tgt->ha;
1470 unsigned long flags;
1471 int res;
1472 /*
1473 * We need to protect against race, when tgt is freed before or
1474 * inside wake_up()
1475 */
1476 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1477 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1478 "tgt %p, sess_count=%d\n",
1479 tgt, tgt->sess_count);
1480 res = (tgt->sess_count == 0);
1481 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1482
1483 return res;
1484 }
1485
1486 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase1(struct qla_tgt * tgt)1487 int qlt_stop_phase1(struct qla_tgt *tgt)
1488 {
1489 struct scsi_qla_host *vha = tgt->vha;
1490 struct qla_hw_data *ha = tgt->ha;
1491 unsigned long flags;
1492
1493 mutex_lock(&ha->optrom_mutex);
1494 mutex_lock(&qla_tgt_mutex);
1495
1496 if (tgt->tgt_stop || tgt->tgt_stopped) {
1497 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1498 "Already in tgt->tgt_stop or tgt_stopped state\n");
1499 mutex_unlock(&qla_tgt_mutex);
1500 mutex_unlock(&ha->optrom_mutex);
1501 return -EPERM;
1502 }
1503
1504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1505 vha->host_no, vha);
1506 /*
1507 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1508 * Lock is needed, because we still can get an incoming packet.
1509 */
1510 mutex_lock(&vha->vha_tgt.tgt_mutex);
1511 tgt->tgt_stop = 1;
1512 qlt_clear_tgt_db(tgt);
1513 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1514 mutex_unlock(&qla_tgt_mutex);
1515
1516 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1517 "Waiting for sess works (tgt %p)", tgt);
1518 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1519 while (!list_empty(&tgt->sess_works_list)) {
1520 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1521 flush_scheduled_work();
1522 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1523 }
1524 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1525
1526 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1527 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1528
1529 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1530
1531 /* Big hammer */
1532 if (!ha->flags.host_shutting_down &&
1533 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1534 qlt_disable_vha(vha);
1535
1536 /* Wait for sessions to clear out (just in case) */
1537 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1538 mutex_unlock(&ha->optrom_mutex);
1539
1540 return 0;
1541 }
1542 EXPORT_SYMBOL(qlt_stop_phase1);
1543
1544 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase2(struct qla_tgt * tgt)1545 void qlt_stop_phase2(struct qla_tgt *tgt)
1546 {
1547 scsi_qla_host_t *vha = tgt->vha;
1548
1549 if (tgt->tgt_stopped) {
1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1551 "Already in tgt->tgt_stopped state\n");
1552 dump_stack();
1553 return;
1554 }
1555 if (!tgt->tgt_stop) {
1556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1557 "%s: phase1 stop is not completed\n", __func__);
1558 dump_stack();
1559 return;
1560 }
1561
1562 mutex_lock(&vha->vha_tgt.tgt_mutex);
1563 tgt->tgt_stop = 0;
1564 tgt->tgt_stopped = 1;
1565 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1566
1567 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1568 tgt);
1569
1570 switch (vha->qlini_mode) {
1571 case QLA2XXX_INI_MODE_EXCLUSIVE:
1572 vha->flags.online = 1;
1573 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1574 break;
1575 default:
1576 break;
1577 }
1578 }
1579 EXPORT_SYMBOL(qlt_stop_phase2);
1580
1581 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
qlt_release(struct qla_tgt * tgt)1582 static void qlt_release(struct qla_tgt *tgt)
1583 {
1584 scsi_qla_host_t *vha = tgt->vha;
1585 void *node;
1586 u64 key = 0;
1587 u16 i;
1588 struct qla_qpair_hint *h;
1589 struct qla_hw_data *ha = vha->hw;
1590
1591 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1592 qlt_stop_phase1(tgt);
1593
1594 if (!tgt->tgt_stopped)
1595 qlt_stop_phase2(tgt);
1596
1597 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1598 unsigned long flags;
1599
1600 h = &tgt->qphints[i];
1601 if (h->qpair) {
1602 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1603 list_del(&h->hint_elem);
1604 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1605 h->qpair = NULL;
1606 }
1607 }
1608 kfree(tgt->qphints);
1609 mutex_lock(&qla_tgt_mutex);
1610 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1611 mutex_unlock(&qla_tgt_mutex);
1612
1613 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1614 btree_remove64(&tgt->lun_qpair_map, key);
1615
1616 btree_destroy64(&tgt->lun_qpair_map);
1617
1618 if (vha->vp_idx)
1619 if (ha->tgt.tgt_ops &&
1620 ha->tgt.tgt_ops->remove_target &&
1621 vha->vha_tgt.target_lport_ptr)
1622 ha->tgt.tgt_ops->remove_target(vha);
1623
1624 vha->vha_tgt.qla_tgt = NULL;
1625
1626 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1627 "Release of tgt %p finished\n", tgt);
1628
1629 kfree(tgt);
1630 }
1631
1632 /* ha->hardware_lock supposed to be held on entry */
qlt_sched_sess_work(struct qla_tgt * tgt,int type,const void * param,unsigned int param_size)1633 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1634 const void *param, unsigned int param_size)
1635 {
1636 struct qla_tgt_sess_work_param *prm;
1637 unsigned long flags;
1638
1639 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1640 if (!prm) {
1641 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1642 "qla_target(%d): Unable to create session "
1643 "work, command will be refused", 0);
1644 return -ENOMEM;
1645 }
1646
1647 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1648 "Scheduling work (type %d, prm %p)"
1649 " to find session for param %p (size %d, tgt %p)\n",
1650 type, prm, param, param_size, tgt);
1651
1652 prm->type = type;
1653 memcpy(&prm->tm_iocb, param, param_size);
1654
1655 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1656 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1657 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1658
1659 schedule_work(&tgt->sess_work);
1660
1661 return 0;
1662 }
1663
1664 /*
1665 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1666 */
qlt_send_notify_ack(struct qla_qpair * qpair,struct imm_ntfy_from_isp * ntfy,uint32_t add_flags,uint16_t resp_code,int resp_code_valid,uint16_t srr_flags,uint16_t srr_reject_code,uint8_t srr_explan)1667 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1668 struct imm_ntfy_from_isp *ntfy,
1669 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1670 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1671 {
1672 struct scsi_qla_host *vha = qpair->vha;
1673 struct qla_hw_data *ha = vha->hw;
1674 request_t *pkt;
1675 struct nack_to_isp *nack;
1676
1677 if (!ha->flags.fw_started)
1678 return;
1679
1680 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1681
1682 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1683 if (!pkt) {
1684 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1685 "qla_target(%d): %s failed: unable to allocate "
1686 "request packet\n", vha->vp_idx, __func__);
1687 return;
1688 }
1689
1690 if (vha->vha_tgt.qla_tgt != NULL)
1691 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1692
1693 pkt->entry_type = NOTIFY_ACK_TYPE;
1694 pkt->entry_count = 1;
1695
1696 nack = (struct nack_to_isp *)pkt;
1697 nack->ox_id = ntfy->ox_id;
1698
1699 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1700 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1701 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1702 nack->u.isp24.flags = ntfy->u.isp24.flags &
1703 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
1704 }
1705 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1706 nack->u.isp24.status = ntfy->u.isp24.status;
1707 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1708 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1709 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1710 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1711 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1712 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1713 nack->u.isp24.srr_reject_code = srr_reject_code;
1714 nack->u.isp24.srr_reject_code_expl = srr_explan;
1715 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1716
1717 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1718 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1719 vha->vp_idx, nack->u.isp24.status);
1720
1721 /* Memory Barrier */
1722 wmb();
1723 qla2x00_start_iocbs(vha, qpair->req);
1724 }
1725
qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd * mcmd)1726 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1727 {
1728 struct scsi_qla_host *vha = mcmd->vha;
1729 struct qla_hw_data *ha = vha->hw;
1730 struct abts_resp_to_24xx *resp;
1731 __le32 f_ctl;
1732 uint32_t h;
1733 uint8_t *p;
1734 int rc;
1735 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1736 struct qla_qpair *qpair = mcmd->qpair;
1737
1738 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1739 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1740 ha, mcmd->fc_tm_rsp);
1741
1742 rc = qlt_check_reserve_free_req(qpair, 1);
1743 if (rc) {
1744 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1745 "qla_target(%d): %s failed: unable to allocate request packet\n",
1746 vha->vp_idx, __func__);
1747 return -EAGAIN;
1748 }
1749
1750 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1751 memset(resp, 0, sizeof(*resp));
1752
1753 h = qlt_make_handle(qpair);
1754 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1755 /*
1756 * CTIO type 7 from the firmware doesn't provide a way to
1757 * know the initiator's LOOP ID, hence we can't find
1758 * the session and, so, the command.
1759 */
1760 return -EAGAIN;
1761 } else {
1762 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1763 }
1764
1765 resp->handle = make_handle(qpair->req->id, h);
1766 resp->entry_type = ABTS_RESP_24XX;
1767 resp->entry_count = 1;
1768 resp->nport_handle = abts->nport_handle;
1769 resp->vp_index = vha->vp_idx;
1770 resp->sof_type = abts->sof_type;
1771 resp->exchange_address = abts->exchange_address;
1772 resp->fcp_hdr_le = abts->fcp_hdr_le;
1773 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1774 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1775 F_CTL_SEQ_INITIATIVE);
1776 p = (uint8_t *)&f_ctl;
1777 resp->fcp_hdr_le.f_ctl[0] = *p++;
1778 resp->fcp_hdr_le.f_ctl[1] = *p++;
1779 resp->fcp_hdr_le.f_ctl[2] = *p;
1780
1781 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1782 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1783
1784 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1785 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1786 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1787 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1788 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1789 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1790 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1791 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1792 } else {
1793 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1794 resp->payload.ba_rjt.reason_code =
1795 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1796 /* Other bytes are zero */
1797 }
1798
1799 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1800
1801 /* Memory Barrier */
1802 wmb();
1803 if (qpair->reqq_start_iocbs)
1804 qpair->reqq_start_iocbs(qpair);
1805 else
1806 qla2x00_start_iocbs(vha, qpair->req);
1807
1808 return rc;
1809 }
1810
1811 /*
1812 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1813 */
qlt_24xx_send_abts_resp(struct qla_qpair * qpair,struct abts_recv_from_24xx * abts,uint32_t status,bool ids_reversed)1814 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1815 struct abts_recv_from_24xx *abts, uint32_t status,
1816 bool ids_reversed)
1817 {
1818 struct scsi_qla_host *vha = qpair->vha;
1819 struct qla_hw_data *ha = vha->hw;
1820 struct abts_resp_to_24xx *resp;
1821 __le32 f_ctl;
1822 uint8_t *p;
1823
1824 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1825 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1826 ha, abts, status);
1827
1828 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1829 NULL);
1830 if (!resp) {
1831 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1832 "qla_target(%d): %s failed: unable to allocate "
1833 "request packet", vha->vp_idx, __func__);
1834 return;
1835 }
1836
1837 resp->entry_type = ABTS_RESP_24XX;
1838 resp->handle = QLA_TGT_SKIP_HANDLE;
1839 resp->entry_count = 1;
1840 resp->nport_handle = abts->nport_handle;
1841 resp->vp_index = vha->vp_idx;
1842 resp->sof_type = abts->sof_type;
1843 resp->exchange_address = abts->exchange_address;
1844 resp->fcp_hdr_le = abts->fcp_hdr_le;
1845 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1846 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1847 F_CTL_SEQ_INITIATIVE);
1848 p = (uint8_t *)&f_ctl;
1849 resp->fcp_hdr_le.f_ctl[0] = *p++;
1850 resp->fcp_hdr_le.f_ctl[1] = *p++;
1851 resp->fcp_hdr_le.f_ctl[2] = *p;
1852 if (ids_reversed) {
1853 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1854 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1855 } else {
1856 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1857 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1858 }
1859 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1860 if (status == FCP_TMF_CMPL) {
1861 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1862 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1863 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1864 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1865 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1866 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1867 } else {
1868 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1869 resp->payload.ba_rjt.reason_code =
1870 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1871 /* Other bytes are zero */
1872 }
1873
1874 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1875
1876 /* Memory Barrier */
1877 wmb();
1878 if (qpair->reqq_start_iocbs)
1879 qpair->reqq_start_iocbs(qpair);
1880 else
1881 qla2x00_start_iocbs(vha, qpair->req);
1882 }
1883
1884 /*
1885 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1886 */
qlt_24xx_retry_term_exchange(struct scsi_qla_host * vha,struct qla_qpair * qpair,response_t * pkt,struct qla_tgt_mgmt_cmd * mcmd)1887 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1888 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1889 {
1890 struct ctio7_to_24xx *ctio;
1891 u16 tmp;
1892 struct abts_recv_from_24xx *entry;
1893
1894 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1895 if (ctio == NULL) {
1896 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1897 "qla_target(%d): %s failed: unable to allocate "
1898 "request packet\n", vha->vp_idx, __func__);
1899 return;
1900 }
1901
1902 if (mcmd)
1903 /* abts from remote port */
1904 entry = &mcmd->orig_iocb.abts;
1905 else
1906 /* abts from this driver. */
1907 entry = (struct abts_recv_from_24xx *)pkt;
1908
1909 /*
1910 * We've got on entrance firmware's response on by us generated
1911 * ABTS response. So, in it ID fields are reversed.
1912 */
1913
1914 ctio->entry_type = CTIO_TYPE7;
1915 ctio->entry_count = 1;
1916 ctio->nport_handle = entry->nport_handle;
1917 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1918 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1919 ctio->vp_index = vha->vp_idx;
1920 ctio->exchange_addr = entry->exchange_addr_to_abort;
1921 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1922
1923 if (mcmd) {
1924 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1925
1926 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1927 tmp |= (mcmd->abort_io_attr << 9);
1928 else if (qpair->retry_term_cnt & 1)
1929 tmp |= (0x4 << 9);
1930 } else {
1931 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1932
1933 if (qpair->retry_term_cnt & 1)
1934 tmp |= (0x4 << 9);
1935 }
1936 ctio->u.status1.flags = cpu_to_le16(tmp);
1937 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1938
1939 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1940 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1941 le16_to_cpu(ctio->u.status1.flags),
1942 le16_to_cpu(ctio->u.status1.ox_id),
1943 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1944
1945 /* Memory Barrier */
1946 wmb();
1947 if (qpair->reqq_start_iocbs)
1948 qpair->reqq_start_iocbs(qpair);
1949 else
1950 qla2x00_start_iocbs(vha, qpair->req);
1951
1952 if (mcmd)
1953 qlt_build_abts_resp_iocb(mcmd);
1954 else
1955 qlt_24xx_send_abts_resp(qpair,
1956 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1957
1958 }
1959
1960 /* drop cmds for the given lun
1961 * XXX only looks for cmds on the port through which lun reset was recieved
1962 * XXX does not go through the list of other port (which may have cmds
1963 * for the same lun)
1964 */
abort_cmds_for_lun(struct scsi_qla_host * vha,u64 lun,be_id_t s_id)1965 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1966 {
1967 struct qla_tgt_sess_op *op;
1968 struct qla_tgt_cmd *cmd;
1969 uint32_t key;
1970 unsigned long flags;
1971
1972 key = sid_to_key(s_id);
1973 spin_lock_irqsave(&vha->cmd_list_lock, flags);
1974 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1975 uint32_t op_key;
1976 u64 op_lun;
1977
1978 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1979 op_lun = scsilun_to_int(
1980 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1981 if (op_key == key && op_lun == lun)
1982 op->aborted = true;
1983 }
1984
1985 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1986 uint32_t op_key;
1987 u64 op_lun;
1988
1989 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1990 op_lun = scsilun_to_int(
1991 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1992 if (op_key == key && op_lun == lun)
1993 op->aborted = true;
1994 }
1995
1996 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1997 uint32_t cmd_key;
1998 u64 cmd_lun;
1999
2000 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2001 cmd_lun = scsilun_to_int(
2002 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2003 if (cmd_key == key && cmd_lun == lun)
2004 cmd->aborted = 1;
2005 }
2006 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2007 }
2008
qlt_find_qphint(struct scsi_qla_host * vha,uint64_t unpacked_lun)2009 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2010 uint64_t unpacked_lun)
2011 {
2012 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2013 struct qla_qpair_hint *h = NULL;
2014
2015 if (vha->flags.qpairs_available) {
2016 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2017 if (!h)
2018 h = &tgt->qphints[0];
2019 } else {
2020 h = &tgt->qphints[0];
2021 }
2022
2023 return h;
2024 }
2025
qlt_do_tmr_work(struct work_struct * work)2026 static void qlt_do_tmr_work(struct work_struct *work)
2027 {
2028 struct qla_tgt_mgmt_cmd *mcmd =
2029 container_of(work, struct qla_tgt_mgmt_cmd, work);
2030 struct qla_hw_data *ha = mcmd->vha->hw;
2031 int rc;
2032 uint32_t tag;
2033 unsigned long flags;
2034
2035 switch (mcmd->tmr_func) {
2036 case QLA_TGT_ABTS:
2037 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
2038 break;
2039 default:
2040 tag = 0;
2041 break;
2042 }
2043
2044 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2045 mcmd->tmr_func, tag);
2046
2047 if (rc != 0) {
2048 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2049 switch (mcmd->tmr_func) {
2050 case QLA_TGT_ABTS:
2051 mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2052 qlt_build_abts_resp_iocb(mcmd);
2053 break;
2054 case QLA_TGT_LUN_RESET:
2055 case QLA_TGT_CLEAR_TS:
2056 case QLA_TGT_ABORT_TS:
2057 case QLA_TGT_CLEAR_ACA:
2058 case QLA_TGT_TARGET_RESET:
2059 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2060 qla_sam_status);
2061 break;
2062
2063 case QLA_TGT_ABORT_ALL:
2064 case QLA_TGT_NEXUS_LOSS_SESS:
2065 case QLA_TGT_NEXUS_LOSS:
2066 qlt_send_notify_ack(mcmd->qpair,
2067 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2068 break;
2069 }
2070 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2071
2072 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2073 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2074 mcmd->vha->vp_idx, rc);
2075 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2076 }
2077 }
2078
2079 /* ha->hardware_lock supposed to be held on entry */
__qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts,struct fc_port * sess)2080 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2081 struct abts_recv_from_24xx *abts, struct fc_port *sess)
2082 {
2083 struct qla_hw_data *ha = vha->hw;
2084 struct qla_tgt_mgmt_cmd *mcmd;
2085 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2086
2087 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2088 "qla_target(%d): task abort (tag=%d)\n",
2089 vha->vp_idx, abts->exchange_addr_to_abort);
2090
2091 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2092 if (mcmd == NULL) {
2093 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2094 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2095 vha->vp_idx, __func__);
2096 return -ENOMEM;
2097 }
2098 memset(mcmd, 0, sizeof(*mcmd));
2099 mcmd->cmd_type = TYPE_TGT_TMCMD;
2100 mcmd->sess = sess;
2101 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2102 mcmd->reset_count = ha->base_qpair->chip_reset;
2103 mcmd->tmr_func = QLA_TGT_ABTS;
2104 mcmd->qpair = h->qpair;
2105 mcmd->vha = vha;
2106
2107 /*
2108 * LUN is looked up by target-core internally based on the passed
2109 * abts->exchange_addr_to_abort tag.
2110 */
2111 mcmd->se_cmd.cpuid = h->cpuid;
2112
2113 if (ha->tgt.tgt_ops->find_cmd_by_tag) {
2114 struct qla_tgt_cmd *abort_cmd;
2115
2116 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2117 le32_to_cpu(abts->exchange_addr_to_abort));
2118 if (abort_cmd && abort_cmd->qpair) {
2119 mcmd->qpair = abort_cmd->qpair;
2120 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2121 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2122 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2123 }
2124 }
2125
2126 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2127 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2128
2129 return 0;
2130 }
2131
2132 /*
2133 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2134 */
qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts)2135 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2136 struct abts_recv_from_24xx *abts)
2137 {
2138 struct qla_hw_data *ha = vha->hw;
2139 struct fc_port *sess;
2140 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
2141 be_id_t s_id;
2142 int rc;
2143 unsigned long flags;
2144
2145 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2146 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2147 "qla_target(%d): ABTS: Abort Sequence not "
2148 "supported\n", vha->vp_idx);
2149 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2150 false);
2151 return;
2152 }
2153
2154 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2155 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2156 "qla_target(%d): ABTS: Unknown Exchange "
2157 "Address received\n", vha->vp_idx);
2158 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2159 false);
2160 return;
2161 }
2162
2163 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2164 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2165 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2166 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2167 le32_to_cpu(abts->fcp_hdr_le.parameter));
2168
2169 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2170
2171 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2172 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2173 if (!sess) {
2174 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2175 "qla_target(%d): task abort for non-existent session\n",
2176 vha->vp_idx);
2177 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2178
2179 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2180 false);
2181 return;
2182 }
2183 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2184
2185
2186 if (sess->deleted) {
2187 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2188 false);
2189 return;
2190 }
2191
2192 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2193 if (rc != 0) {
2194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2195 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2196 vha->vp_idx, rc);
2197 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2198 false);
2199 return;
2200 }
2201 }
2202
2203 /*
2204 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2205 */
qlt_24xx_send_task_mgmt_ctio(struct qla_qpair * qpair,struct qla_tgt_mgmt_cmd * mcmd,uint32_t resp_code)2206 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2207 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2208 {
2209 struct scsi_qla_host *ha = mcmd->vha;
2210 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2211 struct ctio7_to_24xx *ctio;
2212 uint16_t temp;
2213
2214 ql_dbg(ql_dbg_tgt, ha, 0xe008,
2215 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2216 ha, atio, resp_code);
2217
2218
2219 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2220 if (ctio == NULL) {
2221 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2222 "qla_target(%d): %s failed: unable to allocate "
2223 "request packet\n", ha->vp_idx, __func__);
2224 return;
2225 }
2226
2227 ctio->entry_type = CTIO_TYPE7;
2228 ctio->entry_count = 1;
2229 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2230 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
2231 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2232 ctio->vp_index = ha->vp_idx;
2233 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2234 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2235 temp = (atio->u.isp24.attr << 9)|
2236 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2237 ctio->u.status1.flags = cpu_to_le16(temp);
2238 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2239 ctio->u.status1.ox_id = cpu_to_le16(temp);
2240 ctio->u.status1.scsi_status =
2241 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2242 ctio->u.status1.response_len = cpu_to_le16(8);
2243 ctio->u.status1.sense_data[0] = resp_code;
2244
2245 /* Memory Barrier */
2246 wmb();
2247 if (qpair->reqq_start_iocbs)
2248 qpair->reqq_start_iocbs(qpair);
2249 else
2250 qla2x00_start_iocbs(ha, qpair->req);
2251 }
2252
qlt_free_mcmd(struct qla_tgt_mgmt_cmd * mcmd)2253 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2254 {
2255 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2256 }
2257 EXPORT_SYMBOL(qlt_free_mcmd);
2258
2259 /*
2260 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2261 * reacquire
2262 */
qlt_send_resp_ctio(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,uint8_t scsi_status,uint8_t sense_key,uint8_t asc,uint8_t ascq)2263 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2264 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2265 {
2266 struct atio_from_isp *atio = &cmd->atio;
2267 struct ctio7_to_24xx *ctio;
2268 uint16_t temp;
2269 struct scsi_qla_host *vha = cmd->vha;
2270
2271 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2272 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2273 "sense_key=%02x, asc=%02x, ascq=%02x",
2274 vha, atio, scsi_status, sense_key, asc, ascq);
2275
2276 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2277 if (!ctio) {
2278 ql_dbg(ql_dbg_async, vha, 0x3067,
2279 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2280 vha->host_no, __func__);
2281 goto out;
2282 }
2283
2284 ctio->entry_type = CTIO_TYPE7;
2285 ctio->entry_count = 1;
2286 ctio->handle = QLA_TGT_SKIP_HANDLE;
2287 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2288 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2289 ctio->vp_index = vha->vp_idx;
2290 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2291 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2292 temp = (atio->u.isp24.attr << 9) |
2293 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2294 ctio->u.status1.flags = cpu_to_le16(temp);
2295 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2296 ctio->u.status1.ox_id = cpu_to_le16(temp);
2297 ctio->u.status1.scsi_status =
2298 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2299 ctio->u.status1.response_len = cpu_to_le16(18);
2300 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2301
2302 if (ctio->u.status1.residual != 0)
2303 ctio->u.status1.scsi_status |=
2304 cpu_to_le16(SS_RESIDUAL_UNDER);
2305
2306 /* Fixed format sense data. */
2307 ctio->u.status1.sense_data[0] = 0x70;
2308 ctio->u.status1.sense_data[2] = sense_key;
2309 /* Additional sense length */
2310 ctio->u.status1.sense_data[7] = 0xa;
2311 /* ASC and ASCQ */
2312 ctio->u.status1.sense_data[12] = asc;
2313 ctio->u.status1.sense_data[13] = ascq;
2314
2315 /* Memory Barrier */
2316 wmb();
2317
2318 if (qpair->reqq_start_iocbs)
2319 qpair->reqq_start_iocbs(qpair);
2320 else
2321 qla2x00_start_iocbs(vha, qpair->req);
2322
2323 out:
2324 return;
2325 }
2326
2327 /* callback from target fabric module code */
qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd * mcmd)2328 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2329 {
2330 struct scsi_qla_host *vha = mcmd->sess->vha;
2331 struct qla_hw_data *ha = vha->hw;
2332 unsigned long flags;
2333 struct qla_qpair *qpair = mcmd->qpair;
2334 bool free_mcmd = true;
2335
2336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2337 "TM response mcmd (%p) status %#x state %#x",
2338 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2339
2340 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2341
2342 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2343 /*
2344 * Either the port is not online or this request was from
2345 * previous life, just abort the processing.
2346 */
2347 ql_dbg(ql_dbg_async, vha, 0xe100,
2348 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2349 vha->flags.online, qla2x00_reset_active(vha),
2350 mcmd->reset_count, qpair->chip_reset);
2351 ha->tgt.tgt_ops->free_mcmd(mcmd);
2352 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2353 return;
2354 }
2355
2356 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2357 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2358 case ELS_LOGO:
2359 case ELS_PRLO:
2360 case ELS_TPRLO:
2361 ql_dbg(ql_dbg_disc, vha, 0x2106,
2362 "TM response logo %8phC status %#x state %#x",
2363 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2364 mcmd->flags);
2365 qlt_schedule_sess_for_deletion(mcmd->sess);
2366 break;
2367 default:
2368 qlt_send_notify_ack(vha->hw->base_qpair,
2369 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2370 break;
2371 }
2372 } else {
2373 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2374 qlt_build_abts_resp_iocb(mcmd);
2375 free_mcmd = false;
2376 } else
2377 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2378 mcmd->fc_tm_rsp);
2379 }
2380 /*
2381 * Make the callback for ->free_mcmd() to queue_work() and invoke
2382 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2383 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2384 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2385 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2386 * qlt_xmit_tm_rsp() returns here..
2387 */
2388 if (free_mcmd)
2389 ha->tgt.tgt_ops->free_mcmd(mcmd);
2390
2391 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2392 }
2393 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2394
2395 /* No locks */
qlt_pci_map_calc_cnt(struct qla_tgt_prm * prm)2396 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2397 {
2398 struct qla_tgt_cmd *cmd = prm->cmd;
2399
2400 BUG_ON(cmd->sg_cnt == 0);
2401
2402 prm->sg = (struct scatterlist *)cmd->sg;
2403 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2404 cmd->sg_cnt, cmd->dma_data_direction);
2405 if (unlikely(prm->seg_cnt == 0))
2406 goto out_err;
2407
2408 prm->cmd->sg_mapped = 1;
2409
2410 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2411 /*
2412 * If greater than four sg entries then we need to allocate
2413 * the continuation entries
2414 */
2415 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2416 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2417 QLA_TGT_DATASEGS_PER_CMD_24XX,
2418 QLA_TGT_DATASEGS_PER_CONT_24XX);
2419 } else {
2420 /* DIF */
2421 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2422 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2423 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2424 prm->tot_dsds = prm->seg_cnt;
2425 } else
2426 prm->tot_dsds = prm->seg_cnt;
2427
2428 if (cmd->prot_sg_cnt) {
2429 prm->prot_sg = cmd->prot_sg;
2430 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2431 cmd->prot_sg, cmd->prot_sg_cnt,
2432 cmd->dma_data_direction);
2433 if (unlikely(prm->prot_seg_cnt == 0))
2434 goto out_err;
2435
2436 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2437 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2438 /* Dif Bundling not support here */
2439 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2440 cmd->blk_sz);
2441 prm->tot_dsds += prm->prot_seg_cnt;
2442 } else
2443 prm->tot_dsds += prm->prot_seg_cnt;
2444 }
2445 }
2446
2447 return 0;
2448
2449 out_err:
2450 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2451 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2452 0, prm->cmd->sg_cnt);
2453 return -1;
2454 }
2455
qlt_unmap_sg(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)2456 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2457 {
2458 struct qla_hw_data *ha;
2459 struct qla_qpair *qpair;
2460
2461 if (!cmd->sg_mapped)
2462 return;
2463
2464 qpair = cmd->qpair;
2465
2466 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2467 cmd->dma_data_direction);
2468 cmd->sg_mapped = 0;
2469
2470 if (cmd->prot_sg_cnt)
2471 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2472 cmd->dma_data_direction);
2473
2474 if (!cmd->ctx)
2475 return;
2476 ha = vha->hw;
2477 if (cmd->ctx_dsd_alloced)
2478 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2479
2480 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2481 }
2482
qlt_check_reserve_free_req(struct qla_qpair * qpair,uint32_t req_cnt)2483 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2484 uint32_t req_cnt)
2485 {
2486 uint32_t cnt;
2487 struct req_que *req = qpair->req;
2488
2489 if (req->cnt < (req_cnt + 2)) {
2490 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2491 rd_reg_dword_relaxed(req->req_q_out));
2492
2493 if (req->ring_index < cnt)
2494 req->cnt = cnt - req->ring_index;
2495 else
2496 req->cnt = req->length - (req->ring_index - cnt);
2497
2498 if (unlikely(req->cnt < (req_cnt + 2)))
2499 return -EAGAIN;
2500 }
2501
2502 req->cnt -= req_cnt;
2503
2504 return 0;
2505 }
2506
2507 /*
2508 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2509 */
qlt_get_req_pkt(struct req_que * req)2510 static inline void *qlt_get_req_pkt(struct req_que *req)
2511 {
2512 /* Adjust ring index. */
2513 req->ring_index++;
2514 if (req->ring_index == req->length) {
2515 req->ring_index = 0;
2516 req->ring_ptr = req->ring;
2517 } else {
2518 req->ring_ptr++;
2519 }
2520 return (cont_entry_t *)req->ring_ptr;
2521 }
2522
2523 /* ha->hardware_lock supposed to be held on entry */
qlt_make_handle(struct qla_qpair * qpair)2524 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2525 {
2526 uint32_t h;
2527 int index;
2528 uint8_t found = 0;
2529 struct req_que *req = qpair->req;
2530
2531 h = req->current_outstanding_cmd;
2532
2533 for (index = 1; index < req->num_outstanding_cmds; index++) {
2534 h++;
2535 if (h == req->num_outstanding_cmds)
2536 h = 1;
2537
2538 if (h == QLA_TGT_SKIP_HANDLE)
2539 continue;
2540
2541 if (!req->outstanding_cmds[h]) {
2542 found = 1;
2543 break;
2544 }
2545 }
2546
2547 if (found) {
2548 req->current_outstanding_cmd = h;
2549 } else {
2550 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2551 "qla_target(%d): Ran out of empty cmd slots\n",
2552 qpair->vha->vp_idx);
2553 h = QLA_TGT_NULL_HANDLE;
2554 }
2555
2556 return h;
2557 }
2558
2559 /* ha->hardware_lock supposed to be held on entry */
qlt_24xx_build_ctio_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)2560 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2561 struct qla_tgt_prm *prm)
2562 {
2563 uint32_t h;
2564 struct ctio7_to_24xx *pkt;
2565 struct atio_from_isp *atio = &prm->cmd->atio;
2566 uint16_t temp;
2567
2568 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2569 prm->pkt = pkt;
2570 memset(pkt, 0, sizeof(*pkt));
2571
2572 pkt->entry_type = CTIO_TYPE7;
2573 pkt->entry_count = (uint8_t)prm->req_cnt;
2574 pkt->vp_index = prm->cmd->vp_idx;
2575
2576 h = qlt_make_handle(qpair);
2577 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2578 /*
2579 * CTIO type 7 from the firmware doesn't provide a way to
2580 * know the initiator's LOOP ID, hence we can't find
2581 * the session and, so, the command.
2582 */
2583 return -EAGAIN;
2584 } else
2585 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2586
2587 pkt->handle = make_handle(qpair->req->id, h);
2588 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2589 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2590 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2591 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2592 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2593 temp = atio->u.isp24.attr << 9;
2594 pkt->u.status0.flags |= cpu_to_le16(temp);
2595 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2596 pkt->u.status0.ox_id = cpu_to_le16(temp);
2597 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2598
2599 return 0;
2600 }
2601
2602 /*
2603 * ha->hardware_lock supposed to be held on entry. We have already made sure
2604 * that there is sufficient amount of request entries to not drop it.
2605 */
qlt_load_cont_data_segments(struct qla_tgt_prm * prm)2606 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2607 {
2608 int cnt;
2609 struct dsd64 *cur_dsd;
2610
2611 /* Build continuation packets */
2612 while (prm->seg_cnt > 0) {
2613 cont_a64_entry_t *cont_pkt64 =
2614 (cont_a64_entry_t *)qlt_get_req_pkt(
2615 prm->cmd->qpair->req);
2616
2617 /*
2618 * Make sure that from cont_pkt64 none of
2619 * 64-bit specific fields used for 32-bit
2620 * addressing. Cast to (cont_entry_t *) for
2621 * that.
2622 */
2623
2624 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2625
2626 cont_pkt64->entry_count = 1;
2627 cont_pkt64->sys_define = 0;
2628
2629 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2630 cur_dsd = cont_pkt64->dsd;
2631
2632 /* Load continuation entry data segments */
2633 for (cnt = 0;
2634 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2635 cnt++, prm->seg_cnt--) {
2636 append_dsd64(&cur_dsd, prm->sg);
2637 prm->sg = sg_next(prm->sg);
2638 }
2639 }
2640 }
2641
2642 /*
2643 * ha->hardware_lock supposed to be held on entry. We have already made sure
2644 * that there is sufficient amount of request entries to not drop it.
2645 */
qlt_load_data_segments(struct qla_tgt_prm * prm)2646 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2647 {
2648 int cnt;
2649 struct dsd64 *cur_dsd;
2650 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2651
2652 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2653
2654 /* Setup packet address segment pointer */
2655 cur_dsd = &pkt24->u.status0.dsd;
2656
2657 /* Set total data segment count */
2658 if (prm->seg_cnt)
2659 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2660
2661 if (prm->seg_cnt == 0) {
2662 /* No data transfer */
2663 cur_dsd->address = 0;
2664 cur_dsd->length = 0;
2665 return;
2666 }
2667
2668 /* If scatter gather */
2669
2670 /* Load command entry data segments */
2671 for (cnt = 0;
2672 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2673 cnt++, prm->seg_cnt--) {
2674 append_dsd64(&cur_dsd, prm->sg);
2675 prm->sg = sg_next(prm->sg);
2676 }
2677
2678 qlt_load_cont_data_segments(prm);
2679 }
2680
qlt_has_data(struct qla_tgt_cmd * cmd)2681 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2682 {
2683 return cmd->bufflen > 0;
2684 }
2685
qlt_print_dif_err(struct qla_tgt_prm * prm)2686 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2687 {
2688 struct qla_tgt_cmd *cmd;
2689 struct scsi_qla_host *vha;
2690
2691 /* asc 0x10=dif error */
2692 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2693 cmd = prm->cmd;
2694 vha = cmd->vha;
2695 /* ASCQ */
2696 switch (prm->sense_buffer[13]) {
2697 case 1:
2698 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2699 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2700 "se_cmd=%p tag[%x]",
2701 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2702 cmd->atio.u.isp24.exchange_addr);
2703 break;
2704 case 2:
2705 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2706 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2707 "se_cmd=%p tag[%x]",
2708 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2709 cmd->atio.u.isp24.exchange_addr);
2710 break;
2711 case 3:
2712 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2713 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2714 "se_cmd=%p tag[%x]",
2715 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2716 cmd->atio.u.isp24.exchange_addr);
2717 break;
2718 default:
2719 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2720 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2721 "se_cmd=%p tag[%x]",
2722 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2723 cmd->atio.u.isp24.exchange_addr);
2724 break;
2725 }
2726 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2727 }
2728 }
2729
2730 /*
2731 * Called without ha->hardware_lock held
2732 */
qlt_pre_xmit_response(struct qla_tgt_cmd * cmd,struct qla_tgt_prm * prm,int xmit_type,uint8_t scsi_status,uint32_t * full_req_cnt)2733 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2734 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2735 uint32_t *full_req_cnt)
2736 {
2737 struct se_cmd *se_cmd = &cmd->se_cmd;
2738 struct qla_qpair *qpair = cmd->qpair;
2739
2740 prm->cmd = cmd;
2741 prm->tgt = cmd->tgt;
2742 prm->pkt = NULL;
2743 prm->rq_result = scsi_status;
2744 prm->sense_buffer = &cmd->sense_buffer[0];
2745 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2746 prm->sg = NULL;
2747 prm->seg_cnt = -1;
2748 prm->req_cnt = 1;
2749 prm->residual = 0;
2750 prm->add_status_pkt = 0;
2751 prm->prot_sg = NULL;
2752 prm->prot_seg_cnt = 0;
2753 prm->tot_dsds = 0;
2754
2755 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2756 if (qlt_pci_map_calc_cnt(prm) != 0)
2757 return -EAGAIN;
2758 }
2759
2760 *full_req_cnt = prm->req_cnt;
2761
2762 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2763 prm->residual = se_cmd->residual_count;
2764 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2765 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2766 prm->residual, se_cmd->tag,
2767 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2768 cmd->bufflen, prm->rq_result);
2769 prm->rq_result |= SS_RESIDUAL_UNDER;
2770 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2771 prm->residual = se_cmd->residual_count;
2772 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2773 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2774 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2775 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2776 prm->rq_result |= SS_RESIDUAL_OVER;
2777 }
2778
2779 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2780 /*
2781 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2782 * ignored in *xmit_response() below
2783 */
2784 if (qlt_has_data(cmd)) {
2785 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2786 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2787 (prm->rq_result != 0))) {
2788 prm->add_status_pkt = 1;
2789 (*full_req_cnt)++;
2790 }
2791 }
2792 }
2793
2794 return 0;
2795 }
2796
qlt_need_explicit_conf(struct qla_tgt_cmd * cmd,int sending_sense)2797 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2798 int sending_sense)
2799 {
2800 if (cmd->qpair->enable_class_2)
2801 return 0;
2802
2803 if (sending_sense)
2804 return cmd->conf_compl_supported;
2805 else
2806 return cmd->qpair->enable_explicit_conf &&
2807 cmd->conf_compl_supported;
2808 }
2809
qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx * ctio,struct qla_tgt_prm * prm)2810 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2811 struct qla_tgt_prm *prm)
2812 {
2813 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2814 (uint32_t)sizeof(ctio->u.status1.sense_data));
2815 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2816 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2817 ctio->u.status0.flags |= cpu_to_le16(
2818 CTIO7_FLAGS_EXPLICIT_CONFORM |
2819 CTIO7_FLAGS_CONFORM_REQ);
2820 }
2821 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2822 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2823 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2824 int i;
2825
2826 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2827 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2828 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2829 "Skipping EXPLICIT_CONFORM and "
2830 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2831 "non GOOD status\n");
2832 goto skip_explict_conf;
2833 }
2834 ctio->u.status1.flags |= cpu_to_le16(
2835 CTIO7_FLAGS_EXPLICIT_CONFORM |
2836 CTIO7_FLAGS_CONFORM_REQ);
2837 }
2838 skip_explict_conf:
2839 ctio->u.status1.flags &=
2840 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2841 ctio->u.status1.flags |=
2842 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2843 ctio->u.status1.scsi_status |=
2844 cpu_to_le16(SS_SENSE_LEN_VALID);
2845 ctio->u.status1.sense_length =
2846 cpu_to_le16(prm->sense_buffer_len);
2847 for (i = 0; i < prm->sense_buffer_len/4; i++) {
2848 uint32_t v;
2849
2850 v = get_unaligned_be32(
2851 &((uint32_t *)prm->sense_buffer)[i]);
2852 put_unaligned_le32(v,
2853 &((uint32_t *)ctio->u.status1.sense_data)[i]);
2854 }
2855 qlt_print_dif_err(prm);
2856
2857 } else {
2858 ctio->u.status1.flags &=
2859 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2860 ctio->u.status1.flags |=
2861 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2862 ctio->u.status1.sense_length = 0;
2863 memset(ctio->u.status1.sense_data, 0,
2864 sizeof(ctio->u.status1.sense_data));
2865 }
2866
2867 /* Sense with len > 24, is it possible ??? */
2868 }
2869
2870 static inline int
qlt_hba_err_chk_enabled(struct se_cmd * se_cmd)2871 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2872 {
2873 switch (se_cmd->prot_op) {
2874 case TARGET_PROT_DOUT_INSERT:
2875 case TARGET_PROT_DIN_STRIP:
2876 if (ql2xenablehba_err_chk >= 1)
2877 return 1;
2878 break;
2879 case TARGET_PROT_DOUT_PASS:
2880 case TARGET_PROT_DIN_PASS:
2881 if (ql2xenablehba_err_chk >= 2)
2882 return 1;
2883 break;
2884 case TARGET_PROT_DIN_INSERT:
2885 case TARGET_PROT_DOUT_STRIP:
2886 return 1;
2887 default:
2888 break;
2889 }
2890 return 0;
2891 }
2892
2893 static inline int
qla_tgt_ref_mask_check(struct se_cmd * se_cmd)2894 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2895 {
2896 switch (se_cmd->prot_op) {
2897 case TARGET_PROT_DIN_INSERT:
2898 case TARGET_PROT_DOUT_INSERT:
2899 case TARGET_PROT_DIN_STRIP:
2900 case TARGET_PROT_DOUT_STRIP:
2901 case TARGET_PROT_DIN_PASS:
2902 case TARGET_PROT_DOUT_PASS:
2903 return 1;
2904 default:
2905 return 0;
2906 }
2907 return 0;
2908 }
2909
2910 /*
2911 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2912 */
2913 static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd * cmd,struct crc_context * ctx,uint16_t * pfw_prot_opts)2914 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2915 uint16_t *pfw_prot_opts)
2916 {
2917 struct se_cmd *se_cmd = &cmd->se_cmd;
2918 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2919 scsi_qla_host_t *vha = cmd->tgt->vha;
2920 struct qla_hw_data *ha = vha->hw;
2921 uint32_t t32 = 0;
2922
2923 /*
2924 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2925 * have been immplemented by TCM, before AppTag is avail.
2926 * Look for modesense_handlers[]
2927 */
2928 ctx->app_tag = 0;
2929 ctx->app_tag_mask[0] = 0x0;
2930 ctx->app_tag_mask[1] = 0x0;
2931
2932 if (IS_PI_UNINIT_CAPABLE(ha)) {
2933 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2934 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2935 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2936 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2937 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2938 }
2939
2940 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2941
2942 switch (se_cmd->prot_type) {
2943 case TARGET_DIF_TYPE0_PROT:
2944 /*
2945 * No check for ql2xenablehba_err_chk, as it
2946 * would be an I/O error if hba tag generation
2947 * is not done.
2948 */
2949 ctx->ref_tag = cpu_to_le32(lba);
2950 /* enable ALL bytes of the ref tag */
2951 ctx->ref_tag_mask[0] = 0xff;
2952 ctx->ref_tag_mask[1] = 0xff;
2953 ctx->ref_tag_mask[2] = 0xff;
2954 ctx->ref_tag_mask[3] = 0xff;
2955 break;
2956 case TARGET_DIF_TYPE1_PROT:
2957 /*
2958 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2959 * REF tag, and 16 bit app tag.
2960 */
2961 ctx->ref_tag = cpu_to_le32(lba);
2962 if (!qla_tgt_ref_mask_check(se_cmd) ||
2963 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2964 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2965 break;
2966 }
2967 /* enable ALL bytes of the ref tag */
2968 ctx->ref_tag_mask[0] = 0xff;
2969 ctx->ref_tag_mask[1] = 0xff;
2970 ctx->ref_tag_mask[2] = 0xff;
2971 ctx->ref_tag_mask[3] = 0xff;
2972 break;
2973 case TARGET_DIF_TYPE2_PROT:
2974 /*
2975 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2976 * tag has to match LBA in CDB + N
2977 */
2978 ctx->ref_tag = cpu_to_le32(lba);
2979 if (!qla_tgt_ref_mask_check(se_cmd) ||
2980 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2981 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2982 break;
2983 }
2984 /* enable ALL bytes of the ref tag */
2985 ctx->ref_tag_mask[0] = 0xff;
2986 ctx->ref_tag_mask[1] = 0xff;
2987 ctx->ref_tag_mask[2] = 0xff;
2988 ctx->ref_tag_mask[3] = 0xff;
2989 break;
2990 case TARGET_DIF_TYPE3_PROT:
2991 /* For TYPE 3 protection: 16 bit GUARD only */
2992 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2993 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2994 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2995 break;
2996 }
2997 }
2998
2999 static inline int
qlt_build_ctio_crc2_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)3000 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3001 {
3002 struct dsd64 *cur_dsd;
3003 uint32_t transfer_length = 0;
3004 uint32_t data_bytes;
3005 uint32_t dif_bytes;
3006 uint8_t bundling = 1;
3007 struct crc_context *crc_ctx_pkt = NULL;
3008 struct qla_hw_data *ha;
3009 struct ctio_crc2_to_fw *pkt;
3010 dma_addr_t crc_ctx_dma;
3011 uint16_t fw_prot_opts = 0;
3012 struct qla_tgt_cmd *cmd = prm->cmd;
3013 struct se_cmd *se_cmd = &cmd->se_cmd;
3014 uint32_t h;
3015 struct atio_from_isp *atio = &prm->cmd->atio;
3016 struct qla_tc_param tc;
3017 uint16_t t16;
3018 scsi_qla_host_t *vha = cmd->vha;
3019
3020 ha = vha->hw;
3021
3022 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3023 prm->pkt = pkt;
3024 memset(pkt, 0, sizeof(*pkt));
3025
3026 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3027 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3028 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3029 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3030
3031 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3032 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3033 bundling = 0;
3034
3035 /* Compute dif len and adjust data len to incude protection */
3036 data_bytes = cmd->bufflen;
3037 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3038
3039 switch (se_cmd->prot_op) {
3040 case TARGET_PROT_DIN_INSERT:
3041 case TARGET_PROT_DOUT_STRIP:
3042 transfer_length = data_bytes;
3043 if (cmd->prot_sg_cnt)
3044 data_bytes += dif_bytes;
3045 break;
3046 case TARGET_PROT_DIN_STRIP:
3047 case TARGET_PROT_DOUT_INSERT:
3048 case TARGET_PROT_DIN_PASS:
3049 case TARGET_PROT_DOUT_PASS:
3050 transfer_length = data_bytes + dif_bytes;
3051 break;
3052 default:
3053 BUG();
3054 break;
3055 }
3056
3057 if (!qlt_hba_err_chk_enabled(se_cmd))
3058 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3059 /* HBA error checking enabled */
3060 else if (IS_PI_UNINIT_CAPABLE(ha)) {
3061 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3062 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3063 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3064 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3065 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3066 }
3067
3068 switch (se_cmd->prot_op) {
3069 case TARGET_PROT_DIN_INSERT:
3070 case TARGET_PROT_DOUT_INSERT:
3071 fw_prot_opts |= PO_MODE_DIF_INSERT;
3072 break;
3073 case TARGET_PROT_DIN_STRIP:
3074 case TARGET_PROT_DOUT_STRIP:
3075 fw_prot_opts |= PO_MODE_DIF_REMOVE;
3076 break;
3077 case TARGET_PROT_DIN_PASS:
3078 case TARGET_PROT_DOUT_PASS:
3079 fw_prot_opts |= PO_MODE_DIF_PASS;
3080 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3081 break;
3082 default:/* Normal Request */
3083 fw_prot_opts |= PO_MODE_DIF_PASS;
3084 break;
3085 }
3086
3087 /* ---- PKT ---- */
3088 /* Update entry type to indicate Command Type CRC_2 IOCB */
3089 pkt->entry_type = CTIO_CRC2;
3090 pkt->entry_count = 1;
3091 pkt->vp_index = cmd->vp_idx;
3092
3093 h = qlt_make_handle(qpair);
3094 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3095 /*
3096 * CTIO type 7 from the firmware doesn't provide a way to
3097 * know the initiator's LOOP ID, hence we can't find
3098 * the session and, so, the command.
3099 */
3100 return -EAGAIN;
3101 } else
3102 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3103
3104 pkt->handle = make_handle(qpair->req->id, h);
3105 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3106 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3107 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3108 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3109 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3110
3111 /* silence compile warning */
3112 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3113 pkt->ox_id = cpu_to_le16(t16);
3114
3115 t16 = (atio->u.isp24.attr << 9);
3116 pkt->flags |= cpu_to_le16(t16);
3117 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3118
3119 /* Set transfer direction */
3120 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3121 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3122 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3123 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3124
3125 pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
3126 /* Fibre channel byte count */
3127 pkt->transfer_length = cpu_to_le32(transfer_length);
3128
3129 /* ----- CRC context -------- */
3130
3131 /* Allocate CRC context from global pool */
3132 crc_ctx_pkt = cmd->ctx =
3133 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3134
3135 if (!crc_ctx_pkt)
3136 goto crc_queuing_error;
3137
3138 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3139 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3140
3141 /* Set handle */
3142 crc_ctx_pkt->handle = pkt->handle;
3143
3144 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3145
3146 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3147 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
3148
3149 if (!bundling) {
3150 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3151 } else {
3152 /*
3153 * Configure Bundling if we need to fetch interlaving
3154 * protection PCI accesses
3155 */
3156 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3157 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3158 crc_ctx_pkt->u.bundling.dseg_count =
3159 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3160 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3161 }
3162
3163 /* Finish the common fields of CRC pkt */
3164 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3165 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
3166 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3167 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3168
3169 memset((uint8_t *)&tc, 0 , sizeof(tc));
3170 tc.vha = vha;
3171 tc.blk_sz = cmd->blk_sz;
3172 tc.bufflen = cmd->bufflen;
3173 tc.sg = cmd->sg;
3174 tc.prot_sg = cmd->prot_sg;
3175 tc.ctx = crc_ctx_pkt;
3176 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3177
3178 /* Walks data segments */
3179 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3180
3181 if (!bundling && prm->prot_seg_cnt) {
3182 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3183 prm->tot_dsds, &tc))
3184 goto crc_queuing_error;
3185 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3186 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3187 goto crc_queuing_error;
3188
3189 if (bundling && prm->prot_seg_cnt) {
3190 /* Walks dif segments */
3191 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3192
3193 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3194 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3195 prm->prot_seg_cnt, cmd))
3196 goto crc_queuing_error;
3197 }
3198 return QLA_SUCCESS;
3199
3200 crc_queuing_error:
3201 /* Cleanup will be performed by the caller */
3202 qpair->req->outstanding_cmds[h] = NULL;
3203
3204 return QLA_FUNCTION_FAILED;
3205 }
3206
3207 /*
3208 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3209 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3210 */
qlt_xmit_response(struct qla_tgt_cmd * cmd,int xmit_type,uint8_t scsi_status)3211 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3212 uint8_t scsi_status)
3213 {
3214 struct scsi_qla_host *vha = cmd->vha;
3215 struct qla_qpair *qpair = cmd->qpair;
3216 struct ctio7_to_24xx *pkt;
3217 struct qla_tgt_prm prm;
3218 uint32_t full_req_cnt = 0;
3219 unsigned long flags = 0;
3220 int res;
3221
3222 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3223 (cmd->sess && cmd->sess->deleted)) {
3224 cmd->state = QLA_TGT_STATE_PROCESSED;
3225 res = 0;
3226 goto free;
3227 }
3228
3229 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3230 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3231 (xmit_type & QLA_TGT_XMIT_STATUS) ?
3232 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3233 &cmd->se_cmd, qpair->id);
3234
3235 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3236 &full_req_cnt);
3237 if (unlikely(res != 0))
3238 goto free;
3239
3240 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3241
3242 if (xmit_type == QLA_TGT_XMIT_STATUS)
3243 qpair->tgt_counters.core_qla_snd_status++;
3244 else
3245 qpair->tgt_counters.core_qla_que_buf++;
3246
3247 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3248 /*
3249 * Either the port is not online or this request was from
3250 * previous life, just abort the processing.
3251 */
3252 cmd->state = QLA_TGT_STATE_PROCESSED;
3253 ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3254 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3255 vha->flags.online, qla2x00_reset_active(vha),
3256 cmd->reset_count, qpair->chip_reset);
3257 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3258 res = 0;
3259 goto free;
3260 }
3261
3262 /* Does F/W have an IOCBs for this request */
3263 res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3264 if (unlikely(res))
3265 goto out_unmap_unlock;
3266
3267 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3268 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3269 else
3270 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3271 if (unlikely(res != 0)) {
3272 qpair->req->cnt += full_req_cnt;
3273 goto out_unmap_unlock;
3274 }
3275
3276 pkt = (struct ctio7_to_24xx *)prm.pkt;
3277
3278 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3279 pkt->u.status0.flags |=
3280 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3281 CTIO7_FLAGS_STATUS_MODE_0);
3282
3283 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3284 qlt_load_data_segments(&prm);
3285
3286 if (prm.add_status_pkt == 0) {
3287 if (xmit_type & QLA_TGT_XMIT_STATUS) {
3288 pkt->u.status0.scsi_status =
3289 cpu_to_le16(prm.rq_result);
3290 pkt->u.status0.residual =
3291 cpu_to_le32(prm.residual);
3292 pkt->u.status0.flags |= cpu_to_le16(
3293 CTIO7_FLAGS_SEND_STATUS);
3294 if (qlt_need_explicit_conf(cmd, 0)) {
3295 pkt->u.status0.flags |=
3296 cpu_to_le16(
3297 CTIO7_FLAGS_EXPLICIT_CONFORM |
3298 CTIO7_FLAGS_CONFORM_REQ);
3299 }
3300 }
3301
3302 } else {
3303 /*
3304 * We have already made sure that there is sufficient
3305 * amount of request entries to not drop HW lock in
3306 * req_pkt().
3307 */
3308 struct ctio7_to_24xx *ctio =
3309 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3310 qpair->req);
3311
3312 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3313 "Building additional status packet 0x%p.\n",
3314 ctio);
3315
3316 /*
3317 * T10Dif: ctio_crc2_to_fw overlay ontop of
3318 * ctio7_to_24xx
3319 */
3320 memcpy(ctio, pkt, sizeof(*ctio));
3321 /* reset back to CTIO7 */
3322 ctio->entry_count = 1;
3323 ctio->entry_type = CTIO_TYPE7;
3324 ctio->dseg_count = 0;
3325 ctio->u.status1.flags &= ~cpu_to_le16(
3326 CTIO7_FLAGS_DATA_IN);
3327
3328 /* Real finish is ctio_m1's finish */
3329 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3330 pkt->u.status0.flags |= cpu_to_le16(
3331 CTIO7_FLAGS_DONT_RET_CTIO);
3332
3333 /* qlt_24xx_init_ctio_to_isp will correct
3334 * all neccessary fields that's part of CTIO7.
3335 * There should be no residual of CTIO-CRC2 data.
3336 */
3337 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3338 &prm);
3339 }
3340 } else
3341 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3342
3343
3344 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3345 cmd->cmd_sent_to_fw = 1;
3346 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3347
3348 /* Memory Barrier */
3349 wmb();
3350 if (qpair->reqq_start_iocbs)
3351 qpair->reqq_start_iocbs(qpair);
3352 else
3353 qla2x00_start_iocbs(vha, qpair->req);
3354 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3355
3356 return 0;
3357
3358 out_unmap_unlock:
3359 qlt_unmap_sg(vha, cmd);
3360 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3361
3362 free:
3363 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3364 return res;
3365 }
3366 EXPORT_SYMBOL(qlt_xmit_response);
3367
qlt_rdy_to_xfer(struct qla_tgt_cmd * cmd)3368 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3369 {
3370 struct ctio7_to_24xx *pkt;
3371 struct scsi_qla_host *vha = cmd->vha;
3372 struct qla_tgt *tgt = cmd->tgt;
3373 struct qla_tgt_prm prm;
3374 unsigned long flags = 0;
3375 int res = 0;
3376 struct qla_qpair *qpair = cmd->qpair;
3377
3378 memset(&prm, 0, sizeof(prm));
3379 prm.cmd = cmd;
3380 prm.tgt = tgt;
3381 prm.sg = NULL;
3382 prm.req_cnt = 1;
3383
3384 /* Calculate number of entries and segments required */
3385 if (qlt_pci_map_calc_cnt(&prm) != 0)
3386 return -EAGAIN;
3387
3388 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3389 (cmd->sess && cmd->sess->deleted)) {
3390 /*
3391 * Either the port is not online or this request was from
3392 * previous life, just abort the processing.
3393 */
3394 cmd->aborted = 1;
3395 cmd->write_data_transferred = 0;
3396 cmd->state = QLA_TGT_STATE_DATA_IN;
3397 vha->hw->tgt.tgt_ops->handle_data(cmd);
3398 ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3399 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3400 vha->flags.online, qla2x00_reset_active(vha),
3401 cmd->reset_count, qpair->chip_reset);
3402 return 0;
3403 }
3404
3405 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3406 /* Does F/W have an IOCBs for this request */
3407 res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3408 if (res != 0)
3409 goto out_unlock_free_unmap;
3410 if (cmd->se_cmd.prot_op)
3411 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3412 else
3413 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3414
3415 if (unlikely(res != 0)) {
3416 qpair->req->cnt += prm.req_cnt;
3417 goto out_unlock_free_unmap;
3418 }
3419
3420 pkt = (struct ctio7_to_24xx *)prm.pkt;
3421 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3422 CTIO7_FLAGS_STATUS_MODE_0);
3423
3424 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3425 qlt_load_data_segments(&prm);
3426
3427 cmd->state = QLA_TGT_STATE_NEED_DATA;
3428 cmd->cmd_sent_to_fw = 1;
3429 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3430
3431 /* Memory Barrier */
3432 wmb();
3433 if (qpair->reqq_start_iocbs)
3434 qpair->reqq_start_iocbs(qpair);
3435 else
3436 qla2x00_start_iocbs(vha, qpair->req);
3437 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3438
3439 return res;
3440
3441 out_unlock_free_unmap:
3442 qlt_unmap_sg(vha, cmd);
3443 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3444
3445 return res;
3446 }
3447 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3448
3449
3450 /*
3451 * it is assumed either hardware_lock or qpair lock is held.
3452 */
3453 static void
qlt_handle_dif_error(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct ctio_crc_from_fw * sts)3454 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3455 struct ctio_crc_from_fw *sts)
3456 {
3457 uint8_t *ap = &sts->actual_dif[0];
3458 uint8_t *ep = &sts->expected_dif[0];
3459 uint64_t lba = cmd->se_cmd.t_task_lba;
3460 uint8_t scsi_status, sense_key, asc, ascq;
3461 unsigned long flags;
3462 struct scsi_qla_host *vha = cmd->vha;
3463
3464 cmd->trc_flags |= TRC_DIF_ERR;
3465
3466 cmd->a_guard = get_unaligned_be16(ap + 0);
3467 cmd->a_app_tag = get_unaligned_be16(ap + 2);
3468 cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3469
3470 cmd->e_guard = get_unaligned_be16(ep + 0);
3471 cmd->e_app_tag = get_unaligned_be16(ep + 2);
3472 cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3473
3474 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3475 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3476
3477 scsi_status = sense_key = asc = ascq = 0;
3478
3479 /* check appl tag */
3480 if (cmd->e_app_tag != cmd->a_app_tag) {
3481 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3482 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3483 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3484 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3485 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3486 cmd->atio.u.isp24.fcp_hdr.ox_id);
3487
3488 cmd->dif_err_code = DIF_ERR_APP;
3489 scsi_status = SAM_STAT_CHECK_CONDITION;
3490 sense_key = ABORTED_COMMAND;
3491 asc = 0x10;
3492 ascq = 0x2;
3493 }
3494
3495 /* check ref tag */
3496 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3497 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3498 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3499 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3500 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3501 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3502 cmd->atio.u.isp24.fcp_hdr.ox_id);
3503
3504 cmd->dif_err_code = DIF_ERR_REF;
3505 scsi_status = SAM_STAT_CHECK_CONDITION;
3506 sense_key = ABORTED_COMMAND;
3507 asc = 0x10;
3508 ascq = 0x3;
3509 goto out;
3510 }
3511
3512 /* check guard */
3513 if (cmd->e_guard != cmd->a_guard) {
3514 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3515 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3516 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3517 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3518 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3519 cmd->atio.u.isp24.fcp_hdr.ox_id);
3520
3521 cmd->dif_err_code = DIF_ERR_GRD;
3522 scsi_status = SAM_STAT_CHECK_CONDITION;
3523 sense_key = ABORTED_COMMAND;
3524 asc = 0x10;
3525 ascq = 0x1;
3526 }
3527 out:
3528 switch (cmd->state) {
3529 case QLA_TGT_STATE_NEED_DATA:
3530 /* handle_data will load DIF error code */
3531 cmd->state = QLA_TGT_STATE_DATA_IN;
3532 vha->hw->tgt.tgt_ops->handle_data(cmd);
3533 break;
3534 default:
3535 spin_lock_irqsave(&cmd->cmd_lock, flags);
3536 if (cmd->aborted) {
3537 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3538 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3539 break;
3540 }
3541 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3542
3543 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3544 ascq);
3545 /* assume scsi status gets out on the wire.
3546 * Will not wait for completion.
3547 */
3548 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3549 break;
3550 }
3551 }
3552
3553 /* If hardware_lock held on entry, might drop it, then reaquire */
3554 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
__qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * ntfy)3555 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3556 struct imm_ntfy_from_isp *ntfy)
3557 {
3558 struct nack_to_isp *nack;
3559 struct qla_hw_data *ha = vha->hw;
3560 request_t *pkt;
3561 int ret = 0;
3562
3563 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3564 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3565
3566 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3567 if (pkt == NULL) {
3568 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3569 "qla_target(%d): %s failed: unable to allocate "
3570 "request packet\n", vha->vp_idx, __func__);
3571 return -ENOMEM;
3572 }
3573
3574 pkt->entry_type = NOTIFY_ACK_TYPE;
3575 pkt->entry_count = 1;
3576 pkt->handle = QLA_TGT_SKIP_HANDLE;
3577
3578 nack = (struct nack_to_isp *)pkt;
3579 nack->ox_id = ntfy->ox_id;
3580
3581 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3582 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3583 nack->u.isp24.flags = ntfy->u.isp24.flags &
3584 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3585 }
3586
3587 /* terminate */
3588 nack->u.isp24.flags |=
3589 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3590
3591 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3592 nack->u.isp24.status = ntfy->u.isp24.status;
3593 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3594 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3595 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3596 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3597 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3598 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3599
3600 qla2x00_start_iocbs(vha, vha->req);
3601 return ret;
3602 }
3603
qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * imm,int ha_locked)3604 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3605 struct imm_ntfy_from_isp *imm, int ha_locked)
3606 {
3607 int rc;
3608
3609 WARN_ON_ONCE(!ha_locked);
3610 rc = __qlt_send_term_imm_notif(vha, imm);
3611 pr_debug("rc = %d\n", rc);
3612 }
3613
3614 /*
3615 * If hardware_lock held on entry, might drop it, then reaquire
3616 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3617 */
__qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio)3618 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3619 struct qla_tgt_cmd *cmd,
3620 struct atio_from_isp *atio)
3621 {
3622 struct scsi_qla_host *vha = qpair->vha;
3623 struct ctio7_to_24xx *ctio24;
3624 struct qla_hw_data *ha = vha->hw;
3625 request_t *pkt;
3626 int ret = 0;
3627 uint16_t temp;
3628
3629 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3630
3631 if (cmd)
3632 vha = cmd->vha;
3633
3634 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3635 if (pkt == NULL) {
3636 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3637 "qla_target(%d): %s failed: unable to allocate "
3638 "request packet\n", vha->vp_idx, __func__);
3639 return -ENOMEM;
3640 }
3641
3642 if (cmd != NULL) {
3643 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3644 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3645 "qla_target(%d): Terminating cmd %p with "
3646 "incorrect state %d\n", vha->vp_idx, cmd,
3647 cmd->state);
3648 } else
3649 ret = 1;
3650 }
3651
3652 qpair->tgt_counters.num_term_xchg_sent++;
3653 pkt->entry_count = 1;
3654 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3655
3656 ctio24 = (struct ctio7_to_24xx *)pkt;
3657 ctio24->entry_type = CTIO_TYPE7;
3658 ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
3659 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3660 ctio24->vp_index = vha->vp_idx;
3661 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3662 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3663 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3664 CTIO7_FLAGS_TERMINATE;
3665 ctio24->u.status1.flags = cpu_to_le16(temp);
3666 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3667 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3668
3669 /* Memory Barrier */
3670 wmb();
3671 if (qpair->reqq_start_iocbs)
3672 qpair->reqq_start_iocbs(qpair);
3673 else
3674 qla2x00_start_iocbs(vha, qpair->req);
3675 return ret;
3676 }
3677
qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio,int ha_locked,int ul_abort)3678 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3679 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3680 int ul_abort)
3681 {
3682 struct scsi_qla_host *vha;
3683 unsigned long flags = 0;
3684 int rc;
3685
3686 /* why use different vha? NPIV */
3687 if (cmd)
3688 vha = cmd->vha;
3689 else
3690 vha = qpair->vha;
3691
3692 if (ha_locked) {
3693 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3694 if (rc == -ENOMEM)
3695 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3696 goto done;
3697 }
3698 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3699 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3700 if (rc == -ENOMEM)
3701 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3702
3703 done:
3704 if (cmd && !ul_abort && !cmd->aborted) {
3705 if (cmd->sg_mapped)
3706 qlt_unmap_sg(vha, cmd);
3707 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3708 }
3709
3710 if (!ha_locked)
3711 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3712
3713 return;
3714 }
3715
qlt_init_term_exchange(struct scsi_qla_host * vha)3716 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3717 {
3718 struct list_head free_list;
3719 struct qla_tgt_cmd *cmd, *tcmd;
3720
3721 vha->hw->tgt.leak_exchg_thresh_hold =
3722 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3723
3724 cmd = tcmd = NULL;
3725 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3726 INIT_LIST_HEAD(&free_list);
3727 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3728
3729 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3730 list_del(&cmd->cmd_list);
3731 /* This cmd was never sent to TCM. There is no need
3732 * to schedule free or call free_cmd
3733 */
3734 qlt_free_cmd(cmd);
3735 vha->hw->tgt.num_qfull_cmds_alloc--;
3736 }
3737 }
3738 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3739 }
3740
qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host * vha)3741 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3742 {
3743 uint32_t total_leaked;
3744
3745 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3746
3747 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3748 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3749
3750 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3751 "Chip reset due to exchange starvation: %d/%d.\n",
3752 total_leaked, vha->hw->cur_fw_xcb_count);
3753
3754 if (IS_P3P_TYPE(vha->hw))
3755 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3756 else
3757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3758 qla2xxx_wake_dpc(vha);
3759 }
3760
3761 }
3762
qlt_abort_cmd(struct qla_tgt_cmd * cmd)3763 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3764 {
3765 struct qla_tgt *tgt = cmd->tgt;
3766 struct scsi_qla_host *vha = tgt->vha;
3767 struct se_cmd *se_cmd = &cmd->se_cmd;
3768 unsigned long flags;
3769
3770 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3771 "qla_target(%d): terminating exchange for aborted cmd=%p "
3772 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3773 se_cmd->tag);
3774
3775 spin_lock_irqsave(&cmd->cmd_lock, flags);
3776 if (cmd->aborted) {
3777 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3778 /*
3779 * It's normal to see 2 calls in this path:
3780 * 1) XFER Rdy completion + CMD_T_ABORT
3781 * 2) TCM TMR - drain_state_list
3782 */
3783 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3784 "multiple abort. %p transport_state %x, t_state %x, "
3785 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3786 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3787 return -EIO;
3788 }
3789 cmd->aborted = 1;
3790 cmd->trc_flags |= TRC_ABORT;
3791 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3792
3793 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3794 return 0;
3795 }
3796 EXPORT_SYMBOL(qlt_abort_cmd);
3797
qlt_free_cmd(struct qla_tgt_cmd * cmd)3798 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3799 {
3800 struct fc_port *sess = cmd->sess;
3801
3802 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3803 "%s: se_cmd[%p] ox_id %04x\n",
3804 __func__, &cmd->se_cmd,
3805 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3806
3807 BUG_ON(cmd->cmd_in_wq);
3808
3809 if (cmd->sg_mapped)
3810 qlt_unmap_sg(cmd->vha, cmd);
3811
3812 if (!cmd->q_full)
3813 qlt_decr_num_pend_cmds(cmd->vha);
3814
3815 BUG_ON(cmd->sg_mapped);
3816 cmd->jiffies_at_free = get_jiffies_64();
3817 if (unlikely(cmd->free_sg))
3818 kfree(cmd->sg);
3819
3820 if (!sess || !sess->se_sess) {
3821 WARN_ON(1);
3822 return;
3823 }
3824 cmd->jiffies_at_free = get_jiffies_64();
3825 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3826 }
3827 EXPORT_SYMBOL(qlt_free_cmd);
3828
3829 /*
3830 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3831 */
qlt_term_ctio_exchange(struct qla_qpair * qpair,void * ctio,struct qla_tgt_cmd * cmd,uint32_t status)3832 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3833 struct qla_tgt_cmd *cmd, uint32_t status)
3834 {
3835 int term = 0;
3836 struct scsi_qla_host *vha = qpair->vha;
3837
3838 if (cmd->se_cmd.prot_op)
3839 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3840 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3841 "se_cmd=%p tag[%x] op %#x/%s",
3842 cmd->lba, cmd->lba,
3843 cmd->num_blks, &cmd->se_cmd,
3844 cmd->atio.u.isp24.exchange_addr,
3845 cmd->se_cmd.prot_op,
3846 prot_op_str(cmd->se_cmd.prot_op));
3847
3848 if (ctio != NULL) {
3849 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3850
3851 term = !(c->flags &
3852 cpu_to_le16(OF_TERM_EXCH));
3853 } else
3854 term = 1;
3855
3856 if (term)
3857 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3858
3859 return term;
3860 }
3861
3862
3863 /* ha->hardware_lock supposed to be held on entry */
qlt_ctio_to_cmd(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,void * ctio)3864 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3865 struct rsp_que *rsp, uint32_t handle, void *ctio)
3866 {
3867 void *cmd = NULL;
3868 struct req_que *req;
3869 int qid = GET_QID(handle);
3870 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3871
3872 if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3873 return NULL;
3874
3875 if (qid == rsp->req->id) {
3876 req = rsp->req;
3877 } else if (vha->hw->req_q_map[qid]) {
3878 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3879 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3880 vha->vp_idx, rsp->id, handle);
3881 req = vha->hw->req_q_map[qid];
3882 } else {
3883 return NULL;
3884 }
3885
3886 h &= QLA_CMD_HANDLE_MASK;
3887
3888 if (h != QLA_TGT_NULL_HANDLE) {
3889 if (unlikely(h >= req->num_outstanding_cmds)) {
3890 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3891 "qla_target(%d): Wrong handle %x received\n",
3892 vha->vp_idx, handle);
3893 return NULL;
3894 }
3895
3896 cmd = req->outstanding_cmds[h];
3897 if (unlikely(cmd == NULL)) {
3898 ql_dbg(ql_dbg_async, vha, 0xe053,
3899 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3900 vha->vp_idx, handle, req->id, rsp->id);
3901 return NULL;
3902 }
3903 req->outstanding_cmds[h] = NULL;
3904 } else if (ctio != NULL) {
3905 /* We can't get loop ID from CTIO7 */
3906 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3907 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3908 "support NULL handles\n", vha->vp_idx);
3909 return NULL;
3910 }
3911
3912 return cmd;
3913 }
3914
3915 /*
3916 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3917 */
qlt_do_ctio_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,uint32_t status,void * ctio)3918 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3919 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3920 {
3921 struct qla_hw_data *ha = vha->hw;
3922 struct se_cmd *se_cmd;
3923 struct qla_tgt_cmd *cmd;
3924 struct qla_qpair *qpair = rsp->qpair;
3925
3926 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3927 /* That could happen only in case of an error/reset/abort */
3928 if (status != CTIO_SUCCESS) {
3929 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3930 "Intermediate CTIO received"
3931 " (status %x)\n", status);
3932 }
3933 return;
3934 }
3935
3936 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3937 if (cmd == NULL)
3938 return;
3939
3940 se_cmd = &cmd->se_cmd;
3941 cmd->cmd_sent_to_fw = 0;
3942
3943 qlt_unmap_sg(vha, cmd);
3944
3945 if (unlikely(status != CTIO_SUCCESS)) {
3946 switch (status & 0xFFFF) {
3947 case CTIO_INVALID_RX_ID:
3948 if (printk_ratelimit())
3949 dev_info(&vha->hw->pdev->dev,
3950 "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
3951 vha->vp_idx, cmd->atio.u.isp24.attr,
3952 ((cmd->ctio_flags >> 9) & 0xf),
3953 cmd->ctio_flags);
3954
3955 break;
3956 case CTIO_LIP_RESET:
3957 case CTIO_TARGET_RESET:
3958 case CTIO_ABORTED:
3959 /* driver request abort via Terminate exchange */
3960 case CTIO_TIMEOUT:
3961 /* They are OK */
3962 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3963 "qla_target(%d): CTIO with "
3964 "status %#x received, state %x, se_cmd %p, "
3965 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3966 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3967 status, cmd->state, se_cmd);
3968 break;
3969
3970 case CTIO_PORT_LOGGED_OUT:
3971 case CTIO_PORT_UNAVAILABLE:
3972 {
3973 int logged_out =
3974 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3975
3976 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3977 "qla_target(%d): CTIO with %s status %x "
3978 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3979 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3980 status, cmd->state, se_cmd);
3981
3982 if (logged_out && cmd->sess) {
3983 /*
3984 * Session is already logged out, but we need
3985 * to notify initiator, who's not aware of this
3986 */
3987 cmd->sess->send_els_logo = 1;
3988 ql_dbg(ql_dbg_disc, vha, 0x20f8,
3989 "%s %d %8phC post del sess\n",
3990 __func__, __LINE__, cmd->sess->port_name);
3991
3992 qlt_schedule_sess_for_deletion(cmd->sess);
3993 }
3994 break;
3995 }
3996 case CTIO_DIF_ERROR: {
3997 struct ctio_crc_from_fw *crc =
3998 (struct ctio_crc_from_fw *)ctio;
3999 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4000 "qla_target(%d): CTIO with DIF_ERROR status %x "
4001 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
4002 "expect_dif[0x%llx]\n",
4003 vha->vp_idx, status, cmd->state, se_cmd,
4004 *((u64 *)&crc->actual_dif[0]),
4005 *((u64 *)&crc->expected_dif[0]));
4006
4007 qlt_handle_dif_error(qpair, cmd, ctio);
4008 return;
4009 }
4010 default:
4011 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4012 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4013 vha->vp_idx, status, cmd->state, se_cmd);
4014 break;
4015 }
4016
4017
4018 /* "cmd->aborted" means
4019 * cmd is already aborted/terminated, we don't
4020 * need to terminate again. The exchange is already
4021 * cleaned up/freed at FW level. Just cleanup at driver
4022 * level.
4023 */
4024 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4025 (!cmd->aborted)) {
4026 cmd->trc_flags |= TRC_CTIO_ERR;
4027 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4028 return;
4029 }
4030 }
4031
4032 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4033 cmd->trc_flags |= TRC_CTIO_DONE;
4034 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4035 cmd->state = QLA_TGT_STATE_DATA_IN;
4036
4037 if (status == CTIO_SUCCESS)
4038 cmd->write_data_transferred = 1;
4039
4040 ha->tgt.tgt_ops->handle_data(cmd);
4041 return;
4042 } else if (cmd->aborted) {
4043 cmd->trc_flags |= TRC_CTIO_ABORTED;
4044 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4045 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4046 } else {
4047 cmd->trc_flags |= TRC_CTIO_STRANGE;
4048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4049 "qla_target(%d): A command in state (%d) should "
4050 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4051 }
4052
4053 if (unlikely(status != CTIO_SUCCESS) &&
4054 !cmd->aborted) {
4055 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4056 dump_stack();
4057 }
4058
4059 ha->tgt.tgt_ops->free_cmd(cmd);
4060 }
4061
qlt_get_fcp_task_attr(struct scsi_qla_host * vha,uint8_t task_codes)4062 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4063 uint8_t task_codes)
4064 {
4065 int fcp_task_attr;
4066
4067 switch (task_codes) {
4068 case ATIO_SIMPLE_QUEUE:
4069 fcp_task_attr = TCM_SIMPLE_TAG;
4070 break;
4071 case ATIO_HEAD_OF_QUEUE:
4072 fcp_task_attr = TCM_HEAD_TAG;
4073 break;
4074 case ATIO_ORDERED_QUEUE:
4075 fcp_task_attr = TCM_ORDERED_TAG;
4076 break;
4077 case ATIO_ACA_QUEUE:
4078 fcp_task_attr = TCM_ACA_TAG;
4079 break;
4080 case ATIO_UNTAGGED:
4081 fcp_task_attr = TCM_SIMPLE_TAG;
4082 break;
4083 default:
4084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4085 "qla_target: unknown task code %x, use ORDERED instead\n",
4086 task_codes);
4087 fcp_task_attr = TCM_ORDERED_TAG;
4088 break;
4089 }
4090
4091 return fcp_task_attr;
4092 }
4093
4094 /*
4095 * Process context for I/O path into tcm_qla2xxx code
4096 */
__qlt_do_work(struct qla_tgt_cmd * cmd)4097 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4098 {
4099 scsi_qla_host_t *vha = cmd->vha;
4100 struct qla_hw_data *ha = vha->hw;
4101 struct fc_port *sess = cmd->sess;
4102 struct atio_from_isp *atio = &cmd->atio;
4103 unsigned char *cdb;
4104 unsigned long flags;
4105 uint32_t data_length;
4106 int ret, fcp_task_attr, data_dir, bidi = 0;
4107 struct qla_qpair *qpair = cmd->qpair;
4108
4109 cmd->cmd_in_wq = 0;
4110 cmd->trc_flags |= TRC_DO_WORK;
4111
4112 if (cmd->aborted) {
4113 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4114 "cmd with tag %u is aborted\n",
4115 cmd->atio.u.isp24.exchange_addr);
4116 goto out_term;
4117 }
4118
4119 spin_lock_init(&cmd->cmd_lock);
4120 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4121 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4122
4123 if (atio->u.isp24.fcp_cmnd.rddata &&
4124 atio->u.isp24.fcp_cmnd.wrdata) {
4125 bidi = 1;
4126 data_dir = DMA_TO_DEVICE;
4127 } else if (atio->u.isp24.fcp_cmnd.rddata)
4128 data_dir = DMA_FROM_DEVICE;
4129 else if (atio->u.isp24.fcp_cmnd.wrdata)
4130 data_dir = DMA_TO_DEVICE;
4131 else
4132 data_dir = DMA_NONE;
4133
4134 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4135 atio->u.isp24.fcp_cmnd.task_attr);
4136 data_length = get_datalen_for_atio(atio);
4137
4138 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4139 fcp_task_attr, data_dir, bidi);
4140 if (ret != 0)
4141 goto out_term;
4142 /*
4143 * Drop extra session reference from qlt_handle_cmd_for_atio().
4144 */
4145 ha->tgt.tgt_ops->put_sess(sess);
4146 return;
4147
4148 out_term:
4149 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4150 /*
4151 * cmd has not sent to target yet, so pass NULL as the second
4152 * argument to qlt_send_term_exchange() and free the memory here.
4153 */
4154 cmd->trc_flags |= TRC_DO_WORK_ERR;
4155 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4156 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4157
4158 qlt_decr_num_pend_cmds(vha);
4159 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4160 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4161
4162 ha->tgt.tgt_ops->put_sess(sess);
4163 }
4164
qlt_do_work(struct work_struct * work)4165 static void qlt_do_work(struct work_struct *work)
4166 {
4167 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4168 scsi_qla_host_t *vha = cmd->vha;
4169 unsigned long flags;
4170
4171 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4172 list_del(&cmd->cmd_list);
4173 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4174
4175 __qlt_do_work(cmd);
4176 }
4177
qlt_clr_qp_table(struct scsi_qla_host * vha)4178 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4179 {
4180 unsigned long flags;
4181 struct qla_hw_data *ha = vha->hw;
4182 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4183 void *node;
4184 u64 key = 0;
4185
4186 ql_log(ql_log_info, vha, 0x706c,
4187 "User update Number of Active Qpairs %d\n",
4188 ha->tgt.num_act_qpairs);
4189
4190 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4191
4192 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4193 btree_remove64(&tgt->lun_qpair_map, key);
4194
4195 ha->base_qpair->lun_cnt = 0;
4196 for (key = 0; key < ha->max_qpairs; key++)
4197 if (ha->queue_pair_map[key])
4198 ha->queue_pair_map[key]->lun_cnt = 0;
4199
4200 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4201 }
4202
qlt_assign_qpair(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)4203 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4204 struct qla_tgt_cmd *cmd)
4205 {
4206 struct qla_qpair *qpair, *qp;
4207 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4208 struct qla_qpair_hint *h;
4209
4210 if (vha->flags.qpairs_available) {
4211 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4212 if (unlikely(!h)) {
4213 /* spread lun to qpair ratio evently */
4214 int lcnt = 0, rc;
4215 struct scsi_qla_host *base_vha =
4216 pci_get_drvdata(vha->hw->pdev);
4217
4218 qpair = vha->hw->base_qpair;
4219 if (qpair->lun_cnt == 0) {
4220 qpair->lun_cnt++;
4221 h = qla_qpair_to_hint(tgt, qpair);
4222 BUG_ON(!h);
4223 rc = btree_insert64(&tgt->lun_qpair_map,
4224 cmd->unpacked_lun, h, GFP_ATOMIC);
4225 if (rc) {
4226 qpair->lun_cnt--;
4227 ql_log(ql_log_info, vha, 0xd037,
4228 "Unable to insert lun %llx into lun_qpair_map\n",
4229 cmd->unpacked_lun);
4230 }
4231 goto out;
4232 } else {
4233 lcnt = qpair->lun_cnt;
4234 }
4235
4236 h = NULL;
4237 list_for_each_entry(qp, &base_vha->qp_list,
4238 qp_list_elem) {
4239 if (qp->lun_cnt == 0) {
4240 qp->lun_cnt++;
4241 h = qla_qpair_to_hint(tgt, qp);
4242 BUG_ON(!h);
4243 rc = btree_insert64(&tgt->lun_qpair_map,
4244 cmd->unpacked_lun, h, GFP_ATOMIC);
4245 if (rc) {
4246 qp->lun_cnt--;
4247 ql_log(ql_log_info, vha, 0xd038,
4248 "Unable to insert lun %llx into lun_qpair_map\n",
4249 cmd->unpacked_lun);
4250 }
4251 qpair = qp;
4252 goto out;
4253 } else {
4254 if (qp->lun_cnt < lcnt) {
4255 lcnt = qp->lun_cnt;
4256 qpair = qp;
4257 continue;
4258 }
4259 }
4260 }
4261 BUG_ON(!qpair);
4262 qpair->lun_cnt++;
4263 h = qla_qpair_to_hint(tgt, qpair);
4264 BUG_ON(!h);
4265 rc = btree_insert64(&tgt->lun_qpair_map,
4266 cmd->unpacked_lun, h, GFP_ATOMIC);
4267 if (rc) {
4268 qpair->lun_cnt--;
4269 ql_log(ql_log_info, vha, 0xd039,
4270 "Unable to insert lun %llx into lun_qpair_map\n",
4271 cmd->unpacked_lun);
4272 }
4273 }
4274 } else {
4275 h = &tgt->qphints[0];
4276 }
4277 out:
4278 cmd->qpair = h->qpair;
4279 cmd->se_cmd.cpuid = h->cpuid;
4280 }
4281
qlt_get_tag(scsi_qla_host_t * vha,struct fc_port * sess,struct atio_from_isp * atio)4282 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4283 struct fc_port *sess,
4284 struct atio_from_isp *atio)
4285 {
4286 struct qla_tgt_cmd *cmd;
4287
4288 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4289 if (!cmd)
4290 return NULL;
4291
4292 cmd->cmd_type = TYPE_TGT_CMD;
4293 memcpy(&cmd->atio, atio, sizeof(*atio));
4294 cmd->state = QLA_TGT_STATE_NEW;
4295 cmd->tgt = vha->vha_tgt.qla_tgt;
4296 qlt_incr_num_pend_cmds(vha);
4297 cmd->vha = vha;
4298 cmd->sess = sess;
4299 cmd->loop_id = sess->loop_id;
4300 cmd->conf_compl_supported = sess->conf_compl_supported;
4301
4302 cmd->trc_flags = 0;
4303 cmd->jiffies_at_alloc = get_jiffies_64();
4304
4305 cmd->unpacked_lun = scsilun_to_int(
4306 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4307 qlt_assign_qpair(vha, cmd);
4308 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4309 cmd->vp_idx = vha->vp_idx;
4310
4311 return cmd;
4312 }
4313
4314 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_cmd_for_atio(struct scsi_qla_host * vha,struct atio_from_isp * atio)4315 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4316 struct atio_from_isp *atio)
4317 {
4318 struct qla_hw_data *ha = vha->hw;
4319 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4320 struct fc_port *sess;
4321 struct qla_tgt_cmd *cmd;
4322 unsigned long flags;
4323 port_id_t id;
4324
4325 if (unlikely(tgt->tgt_stop)) {
4326 ql_dbg(ql_dbg_io, vha, 0x3061,
4327 "New command while device %p is shutting down\n", tgt);
4328 return -ENODEV;
4329 }
4330
4331 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4332 if (IS_SW_RESV_ADDR(id))
4333 return -EBUSY;
4334
4335 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4336 if (unlikely(!sess))
4337 return -EFAULT;
4338
4339 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4340 * session deletion, but it's still in sess_del_work wq */
4341 if (sess->deleted) {
4342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4343 "New command while old session %p is being deleted\n",
4344 sess);
4345 return -EFAULT;
4346 }
4347
4348 /*
4349 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4350 */
4351 if (!kref_get_unless_zero(&sess->sess_kref)) {
4352 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4353 "%s: kref_get fail, %8phC oxid %x \n",
4354 __func__, sess->port_name,
4355 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4356 return -EFAULT;
4357 }
4358
4359 cmd = qlt_get_tag(vha, sess, atio);
4360 if (!cmd) {
4361 ql_dbg(ql_dbg_io, vha, 0x3062,
4362 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4363 ha->tgt.tgt_ops->put_sess(sess);
4364 return -EBUSY;
4365 }
4366
4367 cmd->cmd_in_wq = 1;
4368 cmd->trc_flags |= TRC_NEW_CMD;
4369
4370 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4371 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4372 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4373
4374 INIT_WORK(&cmd->work, qlt_do_work);
4375 if (vha->flags.qpairs_available) {
4376 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4377 } else if (ha->msix_count) {
4378 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4379 queue_work_on(smp_processor_id(), qla_tgt_wq,
4380 &cmd->work);
4381 else
4382 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4383 &cmd->work);
4384 } else {
4385 queue_work(qla_tgt_wq, &cmd->work);
4386 }
4387
4388 return 0;
4389 }
4390
4391 /* ha->hardware_lock supposed to be held on entry */
qlt_issue_task_mgmt(struct fc_port * sess,u64 lun,int fn,void * iocb,int flags)4392 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4393 int fn, void *iocb, int flags)
4394 {
4395 struct scsi_qla_host *vha = sess->vha;
4396 struct qla_hw_data *ha = vha->hw;
4397 struct qla_tgt_mgmt_cmd *mcmd;
4398 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4399 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4400
4401 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4402 if (!mcmd) {
4403 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4404 "qla_target(%d): Allocation of management "
4405 "command failed, some commands and their data could "
4406 "leak\n", vha->vp_idx);
4407 return -ENOMEM;
4408 }
4409 memset(mcmd, 0, sizeof(*mcmd));
4410 mcmd->sess = sess;
4411
4412 if (iocb) {
4413 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4414 sizeof(mcmd->orig_iocb.imm_ntfy));
4415 }
4416 mcmd->tmr_func = fn;
4417 mcmd->flags = flags;
4418 mcmd->reset_count = ha->base_qpair->chip_reset;
4419 mcmd->qpair = h->qpair;
4420 mcmd->vha = vha;
4421 mcmd->se_cmd.cpuid = h->cpuid;
4422 mcmd->unpacked_lun = lun;
4423
4424 switch (fn) {
4425 case QLA_TGT_LUN_RESET:
4426 case QLA_TGT_CLEAR_TS:
4427 case QLA_TGT_ABORT_TS:
4428 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4429 fallthrough;
4430 case QLA_TGT_CLEAR_ACA:
4431 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4432 mcmd->qpair = h->qpair;
4433 mcmd->se_cmd.cpuid = h->cpuid;
4434 break;
4435
4436 case QLA_TGT_TARGET_RESET:
4437 case QLA_TGT_NEXUS_LOSS_SESS:
4438 case QLA_TGT_NEXUS_LOSS:
4439 case QLA_TGT_ABORT_ALL:
4440 default:
4441 /* no-op */
4442 break;
4443 }
4444
4445 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4446 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4447 &mcmd->work);
4448
4449 return 0;
4450 }
4451
4452 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_task_mgmt(struct scsi_qla_host * vha,void * iocb)4453 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4454 {
4455 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4456 struct qla_hw_data *ha = vha->hw;
4457 struct fc_port *sess;
4458 u64 unpacked_lun;
4459 int fn;
4460 unsigned long flags;
4461
4462 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4463
4464 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4465 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4466 a->u.isp24.fcp_hdr.s_id);
4467 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4468
4469 unpacked_lun =
4470 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4471
4472 if (sess == NULL || sess->deleted)
4473 return -EFAULT;
4474
4475 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4476 }
4477
4478 /* ha->hardware_lock supposed to be held on entry */
__qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb,struct fc_port * sess)4479 static int __qlt_abort_task(struct scsi_qla_host *vha,
4480 struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4481 {
4482 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4483 struct qla_hw_data *ha = vha->hw;
4484 struct qla_tgt_mgmt_cmd *mcmd;
4485 u64 unpacked_lun;
4486 int rc;
4487
4488 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4489 if (mcmd == NULL) {
4490 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4491 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4492 vha->vp_idx, __func__);
4493 return -ENOMEM;
4494 }
4495 memset(mcmd, 0, sizeof(*mcmd));
4496
4497 mcmd->sess = sess;
4498 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4499 sizeof(mcmd->orig_iocb.imm_ntfy));
4500
4501 unpacked_lun =
4502 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4503 mcmd->reset_count = ha->base_qpair->chip_reset;
4504 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4505 mcmd->qpair = ha->base_qpair;
4506
4507 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4508 le16_to_cpu(iocb->u.isp2x.seq_id));
4509 if (rc != 0) {
4510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4511 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4512 vha->vp_idx, rc);
4513 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4514 return -EFAULT;
4515 }
4516
4517 return 0;
4518 }
4519
4520 /* ha->hardware_lock supposed to be held on entry */
qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4521 static int qlt_abort_task(struct scsi_qla_host *vha,
4522 struct imm_ntfy_from_isp *iocb)
4523 {
4524 struct qla_hw_data *ha = vha->hw;
4525 struct fc_port *sess;
4526 int loop_id;
4527 unsigned long flags;
4528
4529 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4530
4531 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4532 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4533 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4534
4535 if (sess == NULL) {
4536 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4537 "qla_target(%d): task abort for unexisting "
4538 "session\n", vha->vp_idx);
4539 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4540 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4541 }
4542
4543 return __qlt_abort_task(vha, iocb, sess);
4544 }
4545
qlt_logo_completion_handler(fc_port_t * fcport,int rc)4546 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4547 {
4548 if (rc != MBS_COMMAND_COMPLETE) {
4549 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4550 "%s: se_sess %p / sess %p from"
4551 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4552 " LOGO failed: %#x\n",
4553 __func__,
4554 fcport->se_sess,
4555 fcport,
4556 fcport->port_name, fcport->loop_id,
4557 fcport->d_id.b.domain, fcport->d_id.b.area,
4558 fcport->d_id.b.al_pa, rc);
4559 }
4560
4561 fcport->logout_completed = 1;
4562 }
4563
4564 /*
4565 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4566 *
4567 * Schedules sessions with matching port_id/loop_id but different wwn for
4568 * deletion. Returns existing session with matching wwn if present.
4569 * Null otherwise.
4570 */
4571 struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t * vha,uint64_t wwn,port_id_t port_id,uint16_t loop_id,struct fc_port ** conflict_sess)4572 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4573 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4574 {
4575 struct fc_port *sess = NULL, *other_sess;
4576 uint64_t other_wwn;
4577
4578 *conflict_sess = NULL;
4579
4580 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4581
4582 other_wwn = wwn_to_u64(other_sess->port_name);
4583
4584 if (wwn == other_wwn) {
4585 WARN_ON(sess);
4586 sess = other_sess;
4587 continue;
4588 }
4589
4590 /* find other sess with nport_id collision */
4591 if (port_id.b24 == other_sess->d_id.b24) {
4592 if (loop_id != other_sess->loop_id) {
4593 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4594 "Invalidating sess %p loop_id %d wwn %llx.\n",
4595 other_sess, other_sess->loop_id, other_wwn);
4596
4597 /*
4598 * logout_on_delete is set by default, but another
4599 * session that has the same s_id/loop_id combo
4600 * might have cleared it when requested this session
4601 * deletion, so don't touch it
4602 */
4603 qlt_schedule_sess_for_deletion(other_sess);
4604 } else {
4605 /*
4606 * Another wwn used to have our s_id/loop_id
4607 * kill the session, but don't free the loop_id
4608 */
4609 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4610 "Invalidating sess %p loop_id %d wwn %llx.\n",
4611 other_sess, other_sess->loop_id, other_wwn);
4612
4613 other_sess->keep_nport_handle = 1;
4614 if (other_sess->disc_state != DSC_DELETED)
4615 *conflict_sess = other_sess;
4616 qlt_schedule_sess_for_deletion(other_sess);
4617 }
4618 continue;
4619 }
4620
4621 /* find other sess with nport handle collision */
4622 if ((loop_id == other_sess->loop_id) &&
4623 (loop_id != FC_NO_LOOP_ID)) {
4624 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4625 "Invalidating sess %p loop_id %d wwn %llx.\n",
4626 other_sess, other_sess->loop_id, other_wwn);
4627
4628 /* Same loop_id but different s_id
4629 * Ok to kill and logout */
4630 qlt_schedule_sess_for_deletion(other_sess);
4631 }
4632 }
4633
4634 return sess;
4635 }
4636
4637 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
abort_cmds_for_s_id(struct scsi_qla_host * vha,port_id_t * s_id)4638 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4639 {
4640 struct qla_tgt_sess_op *op;
4641 struct qla_tgt_cmd *cmd;
4642 uint32_t key;
4643 int count = 0;
4644 unsigned long flags;
4645
4646 key = (((u32)s_id->b.domain << 16) |
4647 ((u32)s_id->b.area << 8) |
4648 ((u32)s_id->b.al_pa));
4649
4650 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4651 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4652 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4653
4654 if (op_key == key) {
4655 op->aborted = true;
4656 count++;
4657 }
4658 }
4659
4660 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4661 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4662
4663 if (op_key == key) {
4664 op->aborted = true;
4665 count++;
4666 }
4667 }
4668
4669 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4670 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4671
4672 if (cmd_key == key) {
4673 cmd->aborted = 1;
4674 count++;
4675 }
4676 }
4677 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4678
4679 return count;
4680 }
4681
qlt_handle_login(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4682 static int qlt_handle_login(struct scsi_qla_host *vha,
4683 struct imm_ntfy_from_isp *iocb)
4684 {
4685 struct fc_port *sess = NULL, *conflict_sess = NULL;
4686 uint64_t wwn;
4687 port_id_t port_id;
4688 uint16_t loop_id, wd3_lo;
4689 int res = 0;
4690 struct qlt_plogi_ack_t *pla;
4691 unsigned long flags;
4692
4693 lockdep_assert_held(&vha->hw->hardware_lock);
4694
4695 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4696
4697 port_id.b.domain = iocb->u.isp24.port_id[2];
4698 port_id.b.area = iocb->u.isp24.port_id[1];
4699 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4700 port_id.b.rsvd_1 = 0;
4701
4702 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4703
4704 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4705 abort_cmds_for_s_id(vha, &port_id);
4706
4707 if (wwn) {
4708 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4709 sess = qlt_find_sess_invalidate_other(vha, wwn,
4710 port_id, loop_id, &conflict_sess);
4711 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4712 } else {
4713 ql_dbg(ql_dbg_disc, vha, 0xffff,
4714 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4715 __func__, __LINE__, loop_id, port_id.b24);
4716 qlt_send_term_imm_notif(vha, iocb, 1);
4717 goto out;
4718 }
4719
4720 if (IS_SW_RESV_ADDR(port_id)) {
4721 res = 1;
4722 goto out;
4723 }
4724
4725 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4726 if (!pla) {
4727 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4728 "%s %d %8phC Term INOT due to mem alloc fail",
4729 __func__, __LINE__,
4730 iocb->u.isp24.port_name);
4731 qlt_send_term_imm_notif(vha, iocb, 1);
4732 goto out;
4733 }
4734
4735 if (conflict_sess) {
4736 conflict_sess->login_gen++;
4737 qlt_plogi_ack_link(vha, pla, conflict_sess,
4738 QLT_PLOGI_LINK_CONFLICT);
4739 }
4740
4741 if (!sess) {
4742 pla->ref_count++;
4743 ql_dbg(ql_dbg_disc, vha, 0xffff,
4744 "%s %d %8phC post new sess\n",
4745 __func__, __LINE__, iocb->u.isp24.port_name);
4746 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4747 qla24xx_post_newsess_work(vha, &port_id,
4748 iocb->u.isp24.port_name,
4749 iocb->u.isp24.u.plogi.node_name,
4750 pla, 0);
4751 else
4752 qla24xx_post_newsess_work(vha, &port_id,
4753 iocb->u.isp24.port_name, NULL,
4754 pla, 0);
4755
4756 goto out;
4757 }
4758
4759 if (sess->disc_state == DSC_UPD_FCPORT) {
4760 u16 sec;
4761
4762 /*
4763 * Remote port registration is still going on from
4764 * previous login. Allow it to finish before we
4765 * accept the new login.
4766 */
4767 sess->next_disc_state = DSC_DELETE_PEND;
4768 sec = jiffies_to_msecs(jiffies -
4769 sess->jiffies_at_registration) / 1000;
4770 if (sess->sec_since_registration < sec && sec &&
4771 !(sec % 5)) {
4772 sess->sec_since_registration = sec;
4773 ql_dbg(ql_dbg_disc, vha, 0xffff,
4774 "%s %8phC - Slow Rport registration (%d Sec)\n",
4775 __func__, sess->port_name, sec);
4776 }
4777
4778 if (!conflict_sess) {
4779 list_del(&pla->list);
4780 kmem_cache_free(qla_tgt_plogi_cachep, pla);
4781 }
4782
4783 qlt_send_term_imm_notif(vha, iocb, 1);
4784 goto out;
4785 }
4786
4787 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4788 sess->d_id = port_id;
4789 sess->login_gen++;
4790
4791 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4792 sess->fw_login_state = DSC_LS_PRLI_PEND;
4793 sess->local = 0;
4794 sess->loop_id = loop_id;
4795 sess->d_id = port_id;
4796 sess->fw_login_state = DSC_LS_PRLI_PEND;
4797 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4798
4799 if (wd3_lo & BIT_7)
4800 sess->conf_compl_supported = 1;
4801
4802 if ((wd3_lo & BIT_4) == 0)
4803 sess->port_type = FCT_INITIATOR;
4804 else
4805 sess->port_type = FCT_TARGET;
4806
4807 } else
4808 sess->fw_login_state = DSC_LS_PLOGI_PEND;
4809
4810
4811 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4812 "%s %d %8phC DS %d\n",
4813 __func__, __LINE__, sess->port_name, sess->disc_state);
4814
4815 switch (sess->disc_state) {
4816 case DSC_DELETED:
4817 case DSC_LOGIN_PEND:
4818 qlt_plogi_ack_unref(vha, pla);
4819 break;
4820
4821 default:
4822 /*
4823 * Under normal circumstances we want to release nport handle
4824 * during LOGO process to avoid nport handle leaks inside FW.
4825 * The exception is when LOGO is done while another PLOGI with
4826 * the same nport handle is waiting as might be the case here.
4827 * Note: there is always a possibily of a race where session
4828 * deletion has already started for other reasons (e.g. ACL
4829 * removal) and now PLOGI arrives:
4830 * 1. if PLOGI arrived in FW after nport handle has been freed,
4831 * FW must have assigned this PLOGI a new/same handle and we
4832 * can proceed ACK'ing it as usual when session deletion
4833 * completes.
4834 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4835 * bit reached it, the handle has now been released. We'll
4836 * get an error when we ACK this PLOGI. Nothing will be sent
4837 * back to initiator. Initiator should eventually retry
4838 * PLOGI and situation will correct itself.
4839 */
4840 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4841 (sess->d_id.b24 == port_id.b24));
4842
4843 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4844 "%s %d %8phC post del sess\n",
4845 __func__, __LINE__, sess->port_name);
4846
4847
4848 qlt_schedule_sess_for_deletion(sess);
4849 break;
4850 }
4851 out:
4852 return res;
4853 }
4854
4855 /*
4856 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4857 */
qlt_24xx_handle_els(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4858 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4859 struct imm_ntfy_from_isp *iocb)
4860 {
4861 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4862 struct qla_hw_data *ha = vha->hw;
4863 struct fc_port *sess = NULL, *conflict_sess = NULL;
4864 uint64_t wwn;
4865 port_id_t port_id;
4866 uint16_t loop_id;
4867 uint16_t wd3_lo;
4868 int res = 0;
4869 unsigned long flags;
4870
4871 lockdep_assert_held(&ha->hardware_lock);
4872
4873 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4874
4875 port_id.b.domain = iocb->u.isp24.port_id[2];
4876 port_id.b.area = iocb->u.isp24.port_id[1];
4877 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4878 port_id.b.rsvd_1 = 0;
4879
4880 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4881
4882 ql_dbg(ql_dbg_disc, vha, 0xf026,
4883 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4884 vha->vp_idx, iocb->u.isp24.port_id[2],
4885 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4886 iocb->u.isp24.status_subcode, loop_id,
4887 iocb->u.isp24.port_name);
4888
4889 /* res = 1 means ack at the end of thread
4890 * res = 0 means ack async/later.
4891 */
4892 switch (iocb->u.isp24.status_subcode) {
4893 case ELS_PLOGI:
4894 res = qlt_handle_login(vha, iocb);
4895 break;
4896
4897 case ELS_PRLI:
4898 if (N2N_TOPO(ha)) {
4899 sess = qla2x00_find_fcport_by_wwpn(vha,
4900 iocb->u.isp24.port_name, 1);
4901
4902 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
4903 ql_dbg(ql_dbg_disc, vha, 0xffff,
4904 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4905 __func__, __LINE__,
4906 iocb->u.isp24.port_name);
4907 qlt_send_term_imm_notif(vha, iocb, 1);
4908 break;
4909 }
4910
4911 res = qlt_handle_login(vha, iocb);
4912 break;
4913 }
4914
4915 if (IS_SW_RESV_ADDR(port_id)) {
4916 res = 1;
4917 break;
4918 }
4919
4920 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4921
4922 if (wwn) {
4923 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4924 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4925 loop_id, &conflict_sess);
4926 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4927 }
4928
4929 if (conflict_sess) {
4930 switch (conflict_sess->disc_state) {
4931 case DSC_DELETED:
4932 case DSC_DELETE_PEND:
4933 break;
4934 default:
4935 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4936 "PRLI with conflicting sess %p port %8phC\n",
4937 conflict_sess, conflict_sess->port_name);
4938 conflict_sess->fw_login_state =
4939 DSC_LS_PORT_UNAVAIL;
4940 qlt_send_term_imm_notif(vha, iocb, 1);
4941 res = 0;
4942 break;
4943 }
4944 }
4945
4946 if (sess != NULL) {
4947 bool delete = false;
4948 int sec;
4949
4950 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4951 switch (sess->fw_login_state) {
4952 case DSC_LS_PLOGI_PEND:
4953 case DSC_LS_PLOGI_COMP:
4954 case DSC_LS_PRLI_COMP:
4955 break;
4956 default:
4957 delete = true;
4958 break;
4959 }
4960
4961 switch (sess->disc_state) {
4962 case DSC_UPD_FCPORT:
4963 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4964 flags);
4965
4966 sec = jiffies_to_msecs(jiffies -
4967 sess->jiffies_at_registration)/1000;
4968 if (sess->sec_since_registration < sec && sec &&
4969 !(sec % 5)) {
4970 sess->sec_since_registration = sec;
4971 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
4972 "%s %8phC : Slow Rport registration(%d Sec)\n",
4973 __func__, sess->port_name, sec);
4974 }
4975 qlt_send_term_imm_notif(vha, iocb, 1);
4976 return 0;
4977
4978 case DSC_LOGIN_PEND:
4979 case DSC_GPDB:
4980 case DSC_LOGIN_COMPLETE:
4981 case DSC_ADISC:
4982 delete = false;
4983 break;
4984 default:
4985 break;
4986 }
4987
4988 if (delete) {
4989 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4990 flags);
4991 /*
4992 * Impatient initiator sent PRLI before last
4993 * PLOGI could finish. Will force him to re-try,
4994 * while last one finishes.
4995 */
4996 ql_log(ql_log_warn, sess->vha, 0xf095,
4997 "sess %p PRLI received, before plogi ack.\n",
4998 sess);
4999 qlt_send_term_imm_notif(vha, iocb, 1);
5000 res = 0;
5001 break;
5002 }
5003
5004 /*
5005 * This shouldn't happen under normal circumstances,
5006 * since we have deleted the old session during PLOGI
5007 */
5008 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5009 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5010 sess->loop_id, sess, iocb->u.isp24.nport_handle);
5011
5012 sess->local = 0;
5013 sess->loop_id = loop_id;
5014 sess->d_id = port_id;
5015 sess->fw_login_state = DSC_LS_PRLI_PEND;
5016
5017 if (wd3_lo & BIT_7)
5018 sess->conf_compl_supported = 1;
5019
5020 if ((wd3_lo & BIT_4) == 0)
5021 sess->port_type = FCT_INITIATOR;
5022 else
5023 sess->port_type = FCT_TARGET;
5024
5025 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5026 }
5027 res = 1; /* send notify ack */
5028
5029 /* Make session global (not used in fabric mode) */
5030 if (ha->current_topology != ISP_CFG_F) {
5031 if (sess) {
5032 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5033 "%s %d %8phC post nack\n",
5034 __func__, __LINE__, sess->port_name);
5035 qla24xx_post_nack_work(vha, sess, iocb,
5036 SRB_NACK_PRLI);
5037 res = 0;
5038 } else {
5039 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5040 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5041 qla2xxx_wake_dpc(vha);
5042 }
5043 } else {
5044 if (sess) {
5045 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5046 "%s %d %8phC post nack\n",
5047 __func__, __LINE__, sess->port_name);
5048 qla24xx_post_nack_work(vha, sess, iocb,
5049 SRB_NACK_PRLI);
5050 res = 0;
5051 }
5052 }
5053 break;
5054
5055 case ELS_TPRLO:
5056 if (le16_to_cpu(iocb->u.isp24.flags) &
5057 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5058 loop_id = 0xFFFF;
5059 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5060 res = 1;
5061 break;
5062 }
5063 fallthrough;
5064 case ELS_LOGO:
5065 case ELS_PRLO:
5066 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5067 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5068 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5069
5070 if (sess) {
5071 sess->login_gen++;
5072 sess->fw_login_state = DSC_LS_LOGO_PEND;
5073 sess->logo_ack_needed = 1;
5074 memcpy(sess->iocb, iocb, IOCB_SIZE);
5075 }
5076
5077 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5078
5079 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5080 "%s: logo %llx res %d sess %p ",
5081 __func__, wwn, res, sess);
5082 if (res == 0) {
5083 /*
5084 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5085 * for LOGO_ACK & sess delete
5086 */
5087 BUG_ON(!sess);
5088 res = 0;
5089 } else {
5090 /* cmd did not go to upper layer. */
5091 if (sess) {
5092 qlt_schedule_sess_for_deletion(sess);
5093 res = 0;
5094 }
5095 /* else logo will be ack */
5096 }
5097 break;
5098 case ELS_PDISC:
5099 case ELS_ADISC:
5100 {
5101 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5102
5103 if (tgt->link_reinit_iocb_pending) {
5104 qlt_send_notify_ack(ha->base_qpair,
5105 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5106 tgt->link_reinit_iocb_pending = 0;
5107 }
5108
5109 sess = qla2x00_find_fcport_by_wwpn(vha,
5110 iocb->u.isp24.port_name, 1);
5111 if (sess) {
5112 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5113 "sess %p lid %d|%d DS %d LS %d\n",
5114 sess, sess->loop_id, loop_id,
5115 sess->disc_state, sess->fw_login_state);
5116 }
5117
5118 res = 1; /* send notify ack */
5119 break;
5120 }
5121
5122 case ELS_FLOGI: /* should never happen */
5123 default:
5124 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5125 "qla_target(%d): Unsupported ELS command %x "
5126 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5127 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5128 break;
5129 }
5130
5131 ql_dbg(ql_dbg_disc, vha, 0xf026,
5132 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5133 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5134
5135 return res;
5136 }
5137
5138 /*
5139 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5140 */
qlt_handle_imm_notify(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)5141 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5142 struct imm_ntfy_from_isp *iocb)
5143 {
5144 struct qla_hw_data *ha = vha->hw;
5145 uint32_t add_flags = 0;
5146 int send_notify_ack = 1;
5147 uint16_t status;
5148
5149 lockdep_assert_held(&ha->hardware_lock);
5150
5151 status = le16_to_cpu(iocb->u.isp2x.status);
5152 switch (status) {
5153 case IMM_NTFY_LIP_RESET:
5154 {
5155 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5156 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5157 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5158 iocb->u.isp24.status_subcode);
5159
5160 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5161 send_notify_ack = 0;
5162 break;
5163 }
5164
5165 case IMM_NTFY_LIP_LINK_REINIT:
5166 {
5167 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5168
5169 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5170 "qla_target(%d): LINK REINIT (loop %#x, "
5171 "subcode %x)\n", vha->vp_idx,
5172 le16_to_cpu(iocb->u.isp24.nport_handle),
5173 iocb->u.isp24.status_subcode);
5174 if (tgt->link_reinit_iocb_pending) {
5175 qlt_send_notify_ack(ha->base_qpair,
5176 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5177 }
5178 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5179 tgt->link_reinit_iocb_pending = 1;
5180 /*
5181 * QLogic requires to wait after LINK REINIT for possible
5182 * PDISC or ADISC ELS commands
5183 */
5184 send_notify_ack = 0;
5185 break;
5186 }
5187
5188 case IMM_NTFY_PORT_LOGOUT:
5189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5190 "qla_target(%d): Port logout (loop "
5191 "%#x, subcode %x)\n", vha->vp_idx,
5192 le16_to_cpu(iocb->u.isp24.nport_handle),
5193 iocb->u.isp24.status_subcode);
5194
5195 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5196 send_notify_ack = 0;
5197 /* The sessions will be cleared in the callback, if needed */
5198 break;
5199
5200 case IMM_NTFY_GLBL_TPRLO:
5201 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5202 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5203 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5204 send_notify_ack = 0;
5205 /* The sessions will be cleared in the callback, if needed */
5206 break;
5207
5208 case IMM_NTFY_PORT_CONFIG:
5209 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5210 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5211 status);
5212 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5213 send_notify_ack = 0;
5214 /* The sessions will be cleared in the callback, if needed */
5215 break;
5216
5217 case IMM_NTFY_GLBL_LOGO:
5218 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5219 "qla_target(%d): Link failure detected\n",
5220 vha->vp_idx);
5221 /* I_T nexus loss */
5222 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5223 send_notify_ack = 0;
5224 break;
5225
5226 case IMM_NTFY_IOCB_OVERFLOW:
5227 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5228 "qla_target(%d): Cannot provide requested "
5229 "capability (IOCB overflowed the immediate notify "
5230 "resource count)\n", vha->vp_idx);
5231 break;
5232
5233 case IMM_NTFY_ABORT_TASK:
5234 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5235 "qla_target(%d): Abort Task (S %08x I %#x -> "
5236 "L %#x)\n", vha->vp_idx,
5237 le16_to_cpu(iocb->u.isp2x.seq_id),
5238 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5239 le16_to_cpu(iocb->u.isp2x.lun));
5240 if (qlt_abort_task(vha, iocb) == 0)
5241 send_notify_ack = 0;
5242 break;
5243
5244 case IMM_NTFY_RESOURCE:
5245 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5246 "qla_target(%d): Out of resources, host %ld\n",
5247 vha->vp_idx, vha->host_no);
5248 break;
5249
5250 case IMM_NTFY_MSG_RX:
5251 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5252 "qla_target(%d): Immediate notify task %x\n",
5253 vha->vp_idx, iocb->u.isp2x.task_flags);
5254 break;
5255
5256 case IMM_NTFY_ELS:
5257 if (qlt_24xx_handle_els(vha, iocb) == 0)
5258 send_notify_ack = 0;
5259 break;
5260 default:
5261 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5262 "qla_target(%d): Received unknown immediate "
5263 "notify status %x\n", vha->vp_idx, status);
5264 break;
5265 }
5266
5267 if (send_notify_ack)
5268 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5269 0, 0);
5270 }
5271
5272 /*
5273 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5274 * This function sends busy to ISP 2xxx or 24xx.
5275 */
__qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)5276 static int __qlt_send_busy(struct qla_qpair *qpair,
5277 struct atio_from_isp *atio, uint16_t status)
5278 {
5279 struct scsi_qla_host *vha = qpair->vha;
5280 struct ctio7_to_24xx *ctio24;
5281 struct qla_hw_data *ha = vha->hw;
5282 request_t *pkt;
5283 struct fc_port *sess = NULL;
5284 unsigned long flags;
5285 u16 temp;
5286 port_id_t id;
5287
5288 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5289
5290 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5291 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5292 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5293 if (!sess) {
5294 qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5295 return 0;
5296 }
5297 /* Sending marker isn't necessary, since we called from ISR */
5298
5299 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5300 if (!pkt) {
5301 ql_dbg(ql_dbg_io, vha, 0x3063,
5302 "qla_target(%d): %s failed: unable to allocate "
5303 "request packet", vha->vp_idx, __func__);
5304 return -ENOMEM;
5305 }
5306
5307 qpair->tgt_counters.num_q_full_sent++;
5308 pkt->entry_count = 1;
5309 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5310
5311 ctio24 = (struct ctio7_to_24xx *)pkt;
5312 ctio24->entry_type = CTIO_TYPE7;
5313 ctio24->nport_handle = cpu_to_le16(sess->loop_id);
5314 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5315 ctio24->vp_index = vha->vp_idx;
5316 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5317 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5318 temp = (atio->u.isp24.attr << 9) |
5319 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5320 CTIO7_FLAGS_DONT_RET_CTIO;
5321 ctio24->u.status1.flags = cpu_to_le16(temp);
5322 /*
5323 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5324 * if the explicit conformation is used.
5325 */
5326 ctio24->u.status1.ox_id =
5327 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
5328 ctio24->u.status1.scsi_status = cpu_to_le16(status);
5329
5330 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
5331
5332 if (ctio24->u.status1.residual != 0)
5333 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
5334
5335 /* Memory Barrier */
5336 wmb();
5337 if (qpair->reqq_start_iocbs)
5338 qpair->reqq_start_iocbs(qpair);
5339 else
5340 qla2x00_start_iocbs(vha, qpair->req);
5341 return 0;
5342 }
5343
5344 /*
5345 * This routine is used to allocate a command for either a QFull condition
5346 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5347 * out previously.
5348 */
5349 static void
qlt_alloc_qfull_cmd(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint16_t status,int qfull)5350 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5351 struct atio_from_isp *atio, uint16_t status, int qfull)
5352 {
5353 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5354 struct qla_hw_data *ha = vha->hw;
5355 struct fc_port *sess;
5356 struct qla_tgt_cmd *cmd;
5357 unsigned long flags;
5358
5359 if (unlikely(tgt->tgt_stop)) {
5360 ql_dbg(ql_dbg_io, vha, 0x300a,
5361 "New command while device %p is shutting down\n", tgt);
5362 return;
5363 }
5364
5365 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5366 vha->hw->tgt.num_qfull_cmds_dropped++;
5367 if (vha->hw->tgt.num_qfull_cmds_dropped >
5368 vha->qla_stats.stat_max_qfull_cmds_dropped)
5369 vha->qla_stats.stat_max_qfull_cmds_dropped =
5370 vha->hw->tgt.num_qfull_cmds_dropped;
5371
5372 ql_dbg(ql_dbg_io, vha, 0x3068,
5373 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5374 vha->vp_idx, __func__,
5375 vha->hw->tgt.num_qfull_cmds_dropped);
5376
5377 qlt_chk_exch_leak_thresh_hold(vha);
5378 return;
5379 }
5380
5381 sess = ha->tgt.tgt_ops->find_sess_by_s_id
5382 (vha, atio->u.isp24.fcp_hdr.s_id);
5383 if (!sess)
5384 return;
5385
5386 cmd = ha->tgt.tgt_ops->get_cmd(sess);
5387 if (!cmd) {
5388 ql_dbg(ql_dbg_io, vha, 0x3009,
5389 "qla_target(%d): %s: Allocation of cmd failed\n",
5390 vha->vp_idx, __func__);
5391
5392 vha->hw->tgt.num_qfull_cmds_dropped++;
5393 if (vha->hw->tgt.num_qfull_cmds_dropped >
5394 vha->qla_stats.stat_max_qfull_cmds_dropped)
5395 vha->qla_stats.stat_max_qfull_cmds_dropped =
5396 vha->hw->tgt.num_qfull_cmds_dropped;
5397
5398 qlt_chk_exch_leak_thresh_hold(vha);
5399 return;
5400 }
5401
5402 qlt_incr_num_pend_cmds(vha);
5403 INIT_LIST_HEAD(&cmd->cmd_list);
5404 memcpy(&cmd->atio, atio, sizeof(*atio));
5405
5406 cmd->tgt = vha->vha_tgt.qla_tgt;
5407 cmd->vha = vha;
5408 cmd->reset_count = ha->base_qpair->chip_reset;
5409 cmd->q_full = 1;
5410 cmd->qpair = ha->base_qpair;
5411
5412 if (qfull) {
5413 cmd->q_full = 1;
5414 /* NOTE: borrowing the state field to carry the status */
5415 cmd->state = status;
5416 } else
5417 cmd->term_exchg = 1;
5418
5419 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5420 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5421
5422 vha->hw->tgt.num_qfull_cmds_alloc++;
5423 if (vha->hw->tgt.num_qfull_cmds_alloc >
5424 vha->qla_stats.stat_max_qfull_cmds_alloc)
5425 vha->qla_stats.stat_max_qfull_cmds_alloc =
5426 vha->hw->tgt.num_qfull_cmds_alloc;
5427 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5428 }
5429
5430 int
qlt_free_qfull_cmds(struct qla_qpair * qpair)5431 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5432 {
5433 struct scsi_qla_host *vha = qpair->vha;
5434 struct qla_hw_data *ha = vha->hw;
5435 unsigned long flags;
5436 struct qla_tgt_cmd *cmd, *tcmd;
5437 struct list_head free_list, q_full_list;
5438 int rc = 0;
5439
5440 if (list_empty(&ha->tgt.q_full_list))
5441 return 0;
5442
5443 INIT_LIST_HEAD(&free_list);
5444 INIT_LIST_HEAD(&q_full_list);
5445
5446 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5447 if (list_empty(&ha->tgt.q_full_list)) {
5448 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5449 return 0;
5450 }
5451
5452 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5453 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5454
5455 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5456 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5457 if (cmd->q_full)
5458 /* cmd->state is a borrowed field to hold status */
5459 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5460 else if (cmd->term_exchg)
5461 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5462
5463 if (rc == -ENOMEM)
5464 break;
5465
5466 if (cmd->q_full)
5467 ql_dbg(ql_dbg_io, vha, 0x3006,
5468 "%s: busy sent for ox_id[%04x]\n", __func__,
5469 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5470 else if (cmd->term_exchg)
5471 ql_dbg(ql_dbg_io, vha, 0x3007,
5472 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5473 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5474 else
5475 ql_dbg(ql_dbg_io, vha, 0x3008,
5476 "%s: Unexpected cmd in QFull list %p\n", __func__,
5477 cmd);
5478
5479 list_del(&cmd->cmd_list);
5480 list_add_tail(&cmd->cmd_list, &free_list);
5481
5482 /* piggy back on hardware_lock for protection */
5483 vha->hw->tgt.num_qfull_cmds_alloc--;
5484 }
5485 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5486
5487 cmd = NULL;
5488
5489 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5490 list_del(&cmd->cmd_list);
5491 /* This cmd was never sent to TCM. There is no need
5492 * to schedule free or call free_cmd
5493 */
5494 qlt_free_cmd(cmd);
5495 }
5496
5497 if (!list_empty(&q_full_list)) {
5498 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5499 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5500 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5501 }
5502
5503 return rc;
5504 }
5505
5506 static void
qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)5507 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5508 uint16_t status)
5509 {
5510 int rc = 0;
5511 struct scsi_qla_host *vha = qpair->vha;
5512
5513 rc = __qlt_send_busy(qpair, atio, status);
5514 if (rc == -ENOMEM)
5515 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5516 }
5517
5518 static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct atio_from_isp * atio,uint8_t ha_locked)5519 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5520 struct atio_from_isp *atio, uint8_t ha_locked)
5521 {
5522 struct qla_hw_data *ha = vha->hw;
5523 unsigned long flags;
5524
5525 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5526 return 0;
5527
5528 if (!ha_locked)
5529 spin_lock_irqsave(&ha->hardware_lock, flags);
5530 qlt_send_busy(qpair, atio, qla_sam_status);
5531 if (!ha_locked)
5532 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5533
5534 return 1;
5535 }
5536
5537 /* ha->hardware_lock supposed to be held on entry */
5538 /* called via callback from qla2xxx */
qlt_24xx_atio_pkt(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)5539 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5540 struct atio_from_isp *atio, uint8_t ha_locked)
5541 {
5542 struct qla_hw_data *ha = vha->hw;
5543 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5544 int rc;
5545 unsigned long flags = 0;
5546
5547 if (unlikely(tgt == NULL)) {
5548 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5549 "ATIO pkt, but no tgt (ha %p)", ha);
5550 return;
5551 }
5552 /*
5553 * In tgt_stop mode we also should allow all requests to pass.
5554 * Otherwise, some commands can stuck.
5555 */
5556
5557 tgt->atio_irq_cmd_count++;
5558
5559 switch (atio->u.raw.entry_type) {
5560 case ATIO_TYPE7:
5561 if (unlikely(atio->u.isp24.exchange_addr ==
5562 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
5563 ql_dbg(ql_dbg_io, vha, 0x3065,
5564 "qla_target(%d): ATIO_TYPE7 "
5565 "received with UNKNOWN exchange address, "
5566 "sending QUEUE_FULL\n", vha->vp_idx);
5567 if (!ha_locked)
5568 spin_lock_irqsave(&ha->hardware_lock, flags);
5569 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5570 if (!ha_locked)
5571 spin_unlock_irqrestore(&ha->hardware_lock,
5572 flags);
5573 break;
5574 }
5575
5576 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5577 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5578 atio, ha_locked);
5579 if (rc != 0) {
5580 tgt->atio_irq_cmd_count--;
5581 return;
5582 }
5583 rc = qlt_handle_cmd_for_atio(vha, atio);
5584 } else {
5585 rc = qlt_handle_task_mgmt(vha, atio);
5586 }
5587 if (unlikely(rc != 0)) {
5588 if (!ha_locked)
5589 spin_lock_irqsave(&ha->hardware_lock, flags);
5590 switch (rc) {
5591 case -ENODEV:
5592 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5593 "qla_target: Unable to send command to target\n");
5594 break;
5595 case -EBADF:
5596 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5597 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5598 qlt_send_term_exchange(ha->base_qpair, NULL,
5599 atio, 1, 0);
5600 break;
5601 case -EBUSY:
5602 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5603 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5604 vha->vp_idx);
5605 qlt_send_busy(ha->base_qpair, atio,
5606 tc_sam_status);
5607 break;
5608 default:
5609 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5610 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5611 vha->vp_idx);
5612 qlt_send_busy(ha->base_qpair, atio,
5613 qla_sam_status);
5614 break;
5615 }
5616 if (!ha_locked)
5617 spin_unlock_irqrestore(&ha->hardware_lock,
5618 flags);
5619 }
5620 break;
5621
5622 case IMMED_NOTIFY_TYPE:
5623 {
5624 if (unlikely(atio->u.isp2x.entry_status != 0)) {
5625 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5626 "qla_target(%d): Received ATIO packet %x "
5627 "with error status %x\n", vha->vp_idx,
5628 atio->u.raw.entry_type,
5629 atio->u.isp2x.entry_status);
5630 break;
5631 }
5632 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5633
5634 if (!ha_locked)
5635 spin_lock_irqsave(&ha->hardware_lock, flags);
5636 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5637 if (!ha_locked)
5638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5639 break;
5640 }
5641
5642 default:
5643 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5644 "qla_target(%d): Received unknown ATIO atio "
5645 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5646 break;
5647 }
5648
5649 tgt->atio_irq_cmd_count--;
5650 }
5651
5652 /*
5653 * qpair lock is assume to be held
5654 * rc = 0 : send terminate & abts respond
5655 * rc != 0: do not send term & abts respond
5656 */
qlt_chk_unresolv_exchg(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct abts_resp_from_24xx_fw * entry)5657 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5658 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5659 {
5660 struct qla_hw_data *ha = vha->hw;
5661 int rc = 0;
5662
5663 /*
5664 * Detect unresolved exchange. If the same ABTS is unable
5665 * to terminate an existing command and the same ABTS loops
5666 * between FW & Driver, then force FW dump. Under 1 jiff,
5667 * we should see multiple loops.
5668 */
5669 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5670 qpair->retry_term_jiff == jiffies) {
5671 /* found existing exchange */
5672 qpair->retry_term_cnt++;
5673 if (qpair->retry_term_cnt >= 5) {
5674 rc = -EIO;
5675 qpair->retry_term_cnt = 0;
5676 ql_log(ql_log_warn, vha, 0xffff,
5677 "Unable to send ABTS Respond. Dumping firmware.\n");
5678 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5679 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5680
5681 if (qpair == ha->base_qpair)
5682 ha->isp_ops->fw_dump(vha);
5683 else
5684 qla2xxx_dump_fw(vha);
5685
5686 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5687 qla2xxx_wake_dpc(vha);
5688 }
5689 } else if (qpair->retry_term_jiff != jiffies) {
5690 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5691 qpair->retry_term_cnt = 0;
5692 qpair->retry_term_jiff = jiffies;
5693 }
5694
5695 return rc;
5696 }
5697
5698
qlt_handle_abts_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)5699 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5700 struct rsp_que *rsp, response_t *pkt)
5701 {
5702 struct abts_resp_from_24xx_fw *entry =
5703 (struct abts_resp_from_24xx_fw *)pkt;
5704 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5705 struct qla_tgt_mgmt_cmd *mcmd;
5706 struct qla_hw_data *ha = vha->hw;
5707
5708 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5709 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5710 ql_dbg(ql_dbg_async, vha, 0xe064,
5711 "qla_target(%d): ABTS Comp without mcmd\n",
5712 vha->vp_idx);
5713 return;
5714 }
5715
5716 if (mcmd)
5717 vha = mcmd->vha;
5718 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5719
5720 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5721 "ABTS_RESP_24XX: compl_status %x\n",
5722 entry->compl_status);
5723
5724 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5725 if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
5726 le32_to_cpu(entry->error_subcode2) == 0) {
5727 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5728 ha->tgt.tgt_ops->free_mcmd(mcmd);
5729 return;
5730 }
5731 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5732 pkt, mcmd);
5733 } else {
5734 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5735 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5736 vha->vp_idx, entry->compl_status,
5737 entry->error_subcode1,
5738 entry->error_subcode2);
5739 ha->tgt.tgt_ops->free_mcmd(mcmd);
5740 }
5741 } else if (mcmd) {
5742 ha->tgt.tgt_ops->free_mcmd(mcmd);
5743 }
5744 }
5745
5746 /* ha->hardware_lock supposed to be held on entry */
5747 /* called via callback from qla2xxx */
qlt_response_pkt(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)5748 static void qlt_response_pkt(struct scsi_qla_host *vha,
5749 struct rsp_que *rsp, response_t *pkt)
5750 {
5751 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5752
5753 if (unlikely(tgt == NULL)) {
5754 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5755 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5756 vha->vp_idx, pkt->entry_type, vha->hw);
5757 return;
5758 }
5759
5760 /*
5761 * In tgt_stop mode we also should allow all requests to pass.
5762 * Otherwise, some commands can stuck.
5763 */
5764
5765 switch (pkt->entry_type) {
5766 case CTIO_CRC2:
5767 case CTIO_TYPE7:
5768 {
5769 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5770
5771 qlt_do_ctio_completion(vha, rsp, entry->handle,
5772 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5773 entry);
5774 break;
5775 }
5776
5777 case ACCEPT_TGT_IO_TYPE:
5778 {
5779 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5780 int rc;
5781
5782 if (atio->u.isp2x.status !=
5783 cpu_to_le16(ATIO_CDB_VALID)) {
5784 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5785 "qla_target(%d): ATIO with error "
5786 "status %x received\n", vha->vp_idx,
5787 le16_to_cpu(atio->u.isp2x.status));
5788 break;
5789 }
5790
5791 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5792 if (rc != 0)
5793 return;
5794
5795 rc = qlt_handle_cmd_for_atio(vha, atio);
5796 if (unlikely(rc != 0)) {
5797 switch (rc) {
5798 case -ENODEV:
5799 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5800 "qla_target: Unable to send command to target\n");
5801 break;
5802 case -EBADF:
5803 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5804 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5805 qlt_send_term_exchange(rsp->qpair, NULL,
5806 atio, 1, 0);
5807 break;
5808 case -EBUSY:
5809 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5810 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5811 vha->vp_idx);
5812 qlt_send_busy(rsp->qpair, atio,
5813 tc_sam_status);
5814 break;
5815 default:
5816 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5817 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5818 vha->vp_idx);
5819 qlt_send_busy(rsp->qpair, atio,
5820 qla_sam_status);
5821 break;
5822 }
5823 }
5824 }
5825 break;
5826
5827 case CONTINUE_TGT_IO_TYPE:
5828 {
5829 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5830
5831 qlt_do_ctio_completion(vha, rsp, entry->handle,
5832 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5833 entry);
5834 break;
5835 }
5836
5837 case CTIO_A64_TYPE:
5838 {
5839 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5840
5841 qlt_do_ctio_completion(vha, rsp, entry->handle,
5842 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5843 entry);
5844 break;
5845 }
5846
5847 case IMMED_NOTIFY_TYPE:
5848 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5849 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5850 break;
5851
5852 case NOTIFY_ACK_TYPE:
5853 if (tgt->notify_ack_expected > 0) {
5854 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5855
5856 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5857 "NOTIFY_ACK seq %08x status %x\n",
5858 le16_to_cpu(entry->u.isp2x.seq_id),
5859 le16_to_cpu(entry->u.isp2x.status));
5860 tgt->notify_ack_expected--;
5861 if (entry->u.isp2x.status !=
5862 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5863 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5864 "qla_target(%d): NOTIFY_ACK "
5865 "failed %x\n", vha->vp_idx,
5866 le16_to_cpu(entry->u.isp2x.status));
5867 }
5868 } else {
5869 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5870 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5871 vha->vp_idx);
5872 }
5873 break;
5874
5875 case ABTS_RECV_24XX:
5876 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5877 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5878 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5879 break;
5880
5881 case ABTS_RESP_24XX:
5882 if (tgt->abts_resp_expected > 0) {
5883 qlt_handle_abts_completion(vha, rsp, pkt);
5884 } else {
5885 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5886 "qla_target(%d): Unexpected ABTS_RESP_24XX "
5887 "received\n", vha->vp_idx);
5888 }
5889 break;
5890
5891 default:
5892 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5893 "qla_target(%d): Received unknown response pkt "
5894 "type %x\n", vha->vp_idx, pkt->entry_type);
5895 break;
5896 }
5897
5898 }
5899
5900 /*
5901 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5902 */
qlt_async_event(uint16_t code,struct scsi_qla_host * vha,uint16_t * mailbox)5903 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5904 uint16_t *mailbox)
5905 {
5906 struct qla_hw_data *ha = vha->hw;
5907 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5908 int login_code;
5909
5910 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5911 return;
5912
5913 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5914 IS_QLA2100(ha))
5915 return;
5916 /*
5917 * In tgt_stop mode we also should allow all requests to pass.
5918 * Otherwise, some commands can stuck.
5919 */
5920
5921
5922 switch (code) {
5923 case MBA_RESET: /* Reset */
5924 case MBA_SYSTEM_ERR: /* System Error */
5925 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
5926 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
5927 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5928 "qla_target(%d): System error async event %#x "
5929 "occurred", vha->vp_idx, code);
5930 break;
5931 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
5932 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5933 break;
5934
5935 case MBA_LOOP_UP:
5936 {
5937 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5938 "qla_target(%d): Async LOOP_UP occurred "
5939 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5940 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5941 if (tgt->link_reinit_iocb_pending) {
5942 qlt_send_notify_ack(ha->base_qpair,
5943 &tgt->link_reinit_iocb,
5944 0, 0, 0, 0, 0, 0);
5945 tgt->link_reinit_iocb_pending = 0;
5946 }
5947 break;
5948 }
5949
5950 case MBA_LIP_OCCURRED:
5951 case MBA_LOOP_DOWN:
5952 case MBA_LIP_RESET:
5953 case MBA_RSCN_UPDATE:
5954 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5955 "qla_target(%d): Async event %#x occurred "
5956 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5957 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5958 break;
5959
5960 case MBA_REJECTED_FCP_CMD:
5961 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5962 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5963 vha->vp_idx,
5964 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5965
5966 if (mailbox[3] == 1) {
5967 /* exchange starvation. */
5968 vha->hw->exch_starvation++;
5969 if (vha->hw->exch_starvation > 5) {
5970 ql_log(ql_log_warn, vha, 0xd03a,
5971 "Exchange starvation-. Resetting RISC\n");
5972
5973 vha->hw->exch_starvation = 0;
5974 if (IS_P3P_TYPE(vha->hw))
5975 set_bit(FCOE_CTX_RESET_NEEDED,
5976 &vha->dpc_flags);
5977 else
5978 set_bit(ISP_ABORT_NEEDED,
5979 &vha->dpc_flags);
5980 qla2xxx_wake_dpc(vha);
5981 }
5982 }
5983 break;
5984
5985 case MBA_PORT_UPDATE:
5986 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5987 "qla_target(%d): Port update async event %#x "
5988 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5989 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5990 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5991
5992 login_code = mailbox[2];
5993 if (login_code == 0x4) {
5994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5995 "Async MB 2: Got PLOGI Complete\n");
5996 vha->hw->exch_starvation = 0;
5997 } else if (login_code == 0x7)
5998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5999 "Async MB 2: Port Logged Out\n");
6000 break;
6001 default:
6002 break;
6003 }
6004
6005 }
6006
qlt_get_port_database(struct scsi_qla_host * vha,uint16_t loop_id)6007 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6008 uint16_t loop_id)
6009 {
6010 fc_port_t *fcport, *tfcp, *del;
6011 int rc;
6012 unsigned long flags;
6013 u8 newfcport = 0;
6014
6015 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6016 if (!fcport) {
6017 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6018 "qla_target(%d): Allocation of tmp FC port failed",
6019 vha->vp_idx);
6020 return NULL;
6021 }
6022
6023 fcport->loop_id = loop_id;
6024
6025 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6026 if (rc != QLA_SUCCESS) {
6027 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6028 "qla_target(%d): Failed to retrieve fcport "
6029 "information -- get_port_database() returned %x "
6030 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6031 kfree(fcport);
6032 return NULL;
6033 }
6034
6035 del = NULL;
6036 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6037 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6038
6039 if (tfcp) {
6040 tfcp->d_id = fcport->d_id;
6041 tfcp->port_type = fcport->port_type;
6042 tfcp->supported_classes = fcport->supported_classes;
6043 tfcp->flags |= fcport->flags;
6044 tfcp->scan_state = QLA_FCPORT_FOUND;
6045
6046 del = fcport;
6047 fcport = tfcp;
6048 } else {
6049 if (vha->hw->current_topology == ISP_CFG_F)
6050 fcport->flags |= FCF_FABRIC_DEVICE;
6051
6052 list_add_tail(&fcport->list, &vha->vp_fcports);
6053 if (!IS_SW_RESV_ADDR(fcport->d_id))
6054 vha->fcport_count++;
6055 fcport->login_gen++;
6056 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6057 fcport->login_succ = 1;
6058 newfcport = 1;
6059 }
6060
6061 fcport->deleted = 0;
6062 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6063
6064 switch (vha->host->active_mode) {
6065 case MODE_INITIATOR:
6066 case MODE_DUAL:
6067 if (newfcport) {
6068 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6069 qla24xx_sched_upd_fcport(fcport);
6070 } else {
6071 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6072 "%s %d %8phC post gpsc fcp_cnt %d\n",
6073 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6074 qla24xx_post_gpsc_work(vha, fcport);
6075 }
6076 }
6077 break;
6078
6079 case MODE_TARGET:
6080 default:
6081 break;
6082 }
6083 if (del)
6084 qla2x00_free_fcport(del);
6085
6086 return fcport;
6087 }
6088
6089 /* Must be called under tgt_mutex */
qlt_make_local_sess(struct scsi_qla_host * vha,be_id_t s_id)6090 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6091 be_id_t s_id)
6092 {
6093 struct fc_port *sess = NULL;
6094 fc_port_t *fcport = NULL;
6095 int rc, global_resets;
6096 uint16_t loop_id = 0;
6097
6098 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6099 /*
6100 * This is Domain Controller, so it should be
6101 * OK to drop SCSI commands from it.
6102 */
6103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6104 "Unable to find initiator with S_ID %x:%x:%x",
6105 s_id.domain, s_id.area, s_id.al_pa);
6106 return NULL;
6107 }
6108
6109 mutex_lock(&vha->vha_tgt.tgt_mutex);
6110
6111 retry:
6112 global_resets =
6113 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6114
6115 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6116 if (rc != 0) {
6117 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6118
6119 ql_log(ql_log_info, vha, 0xf071,
6120 "qla_target(%d): Unable to find "
6121 "initiator with S_ID %x:%x:%x",
6122 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6123
6124 if (rc == -ENOENT) {
6125 qlt_port_logo_t logo;
6126
6127 logo.id = be_to_port_id(s_id);
6128 logo.cmd_count = 1;
6129 qlt_send_first_logo(vha, &logo);
6130 }
6131
6132 return NULL;
6133 }
6134
6135 fcport = qlt_get_port_database(vha, loop_id);
6136 if (!fcport) {
6137 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6138 return NULL;
6139 }
6140
6141 if (global_resets !=
6142 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6144 "qla_target(%d): global reset during session discovery "
6145 "(counter was %d, new %d), retrying", vha->vp_idx,
6146 global_resets,
6147 atomic_read(&vha->vha_tgt.
6148 qla_tgt->tgt_global_resets_count));
6149 goto retry;
6150 }
6151
6152 sess = qlt_create_sess(vha, fcport, true);
6153
6154 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6155
6156 return sess;
6157 }
6158
qlt_abort_work(struct qla_tgt * tgt,struct qla_tgt_sess_work_param * prm)6159 static void qlt_abort_work(struct qla_tgt *tgt,
6160 struct qla_tgt_sess_work_param *prm)
6161 {
6162 struct scsi_qla_host *vha = tgt->vha;
6163 struct qla_hw_data *ha = vha->hw;
6164 struct fc_port *sess = NULL;
6165 unsigned long flags = 0, flags2 = 0;
6166 be_id_t s_id;
6167 int rc;
6168
6169 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6170
6171 if (tgt->tgt_stop)
6172 goto out_term2;
6173
6174 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6175
6176 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6177 if (!sess) {
6178 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6179
6180 sess = qlt_make_local_sess(vha, s_id);
6181 /* sess has got an extra creation ref */
6182
6183 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6184 if (!sess)
6185 goto out_term2;
6186 } else {
6187 if (sess->deleted) {
6188 sess = NULL;
6189 goto out_term2;
6190 }
6191
6192 if (!kref_get_unless_zero(&sess->sess_kref)) {
6193 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6194 "%s: kref_get fail %8phC \n",
6195 __func__, sess->port_name);
6196 sess = NULL;
6197 goto out_term2;
6198 }
6199 }
6200
6201 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6202 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6203
6204 ha->tgt.tgt_ops->put_sess(sess);
6205
6206 if (rc != 0)
6207 goto out_term;
6208 return;
6209
6210 out_term2:
6211 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6212
6213 out_term:
6214 spin_lock_irqsave(&ha->hardware_lock, flags);
6215 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6216 FCP_TMF_REJECTED, false);
6217 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6218 }
6219
qlt_tmr_work(struct qla_tgt * tgt,struct qla_tgt_sess_work_param * prm)6220 static void qlt_tmr_work(struct qla_tgt *tgt,
6221 struct qla_tgt_sess_work_param *prm)
6222 {
6223 struct atio_from_isp *a = &prm->tm_iocb2;
6224 struct scsi_qla_host *vha = tgt->vha;
6225 struct qla_hw_data *ha = vha->hw;
6226 struct fc_port *sess;
6227 unsigned long flags;
6228 be_id_t s_id;
6229 int rc;
6230 u64 unpacked_lun;
6231 int fn;
6232 void *iocb;
6233
6234 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6235
6236 if (tgt->tgt_stop)
6237 goto out_term2;
6238
6239 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6240 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6241 if (!sess) {
6242 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6243
6244 sess = qlt_make_local_sess(vha, s_id);
6245 /* sess has got an extra creation ref */
6246
6247 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6248 if (!sess)
6249 goto out_term2;
6250 } else {
6251 if (sess->deleted) {
6252 goto out_term2;
6253 }
6254
6255 if (!kref_get_unless_zero(&sess->sess_kref)) {
6256 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6257 "%s: kref_get fail %8phC\n",
6258 __func__, sess->port_name);
6259 goto out_term2;
6260 }
6261 }
6262
6263 iocb = a;
6264 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6265 unpacked_lun =
6266 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6267
6268 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6269 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6270
6271 ha->tgt.tgt_ops->put_sess(sess);
6272
6273 if (rc != 0)
6274 goto out_term;
6275 return;
6276
6277 out_term2:
6278 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6279 out_term:
6280 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6281 }
6282
qlt_sess_work_fn(struct work_struct * work)6283 static void qlt_sess_work_fn(struct work_struct *work)
6284 {
6285 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6286 struct scsi_qla_host *vha = tgt->vha;
6287 unsigned long flags;
6288
6289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6290
6291 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6292 while (!list_empty(&tgt->sess_works_list)) {
6293 struct qla_tgt_sess_work_param *prm = list_entry(
6294 tgt->sess_works_list.next, typeof(*prm),
6295 sess_works_list_entry);
6296
6297 /*
6298 * This work can be scheduled on several CPUs at time, so we
6299 * must delete the entry to eliminate double processing
6300 */
6301 list_del(&prm->sess_works_list_entry);
6302
6303 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6304
6305 switch (prm->type) {
6306 case QLA_TGT_SESS_WORK_ABORT:
6307 qlt_abort_work(tgt, prm);
6308 break;
6309 case QLA_TGT_SESS_WORK_TM:
6310 qlt_tmr_work(tgt, prm);
6311 break;
6312 default:
6313 BUG_ON(1);
6314 break;
6315 }
6316
6317 spin_lock_irqsave(&tgt->sess_work_lock, flags);
6318
6319 kfree(prm);
6320 }
6321 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6322 }
6323
6324 /* Must be called under tgt_host_action_mutex */
qlt_add_target(struct qla_hw_data * ha,struct scsi_qla_host * base_vha)6325 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6326 {
6327 struct qla_tgt *tgt;
6328 int rc, i;
6329 struct qla_qpair_hint *h;
6330
6331 if (!QLA_TGT_MODE_ENABLED())
6332 return 0;
6333
6334 if (!IS_TGT_MODE_CAPABLE(ha)) {
6335 ql_log(ql_log_warn, base_vha, 0xe070,
6336 "This adapter does not support target mode.\n");
6337 return 0;
6338 }
6339
6340 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6341 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6342
6343 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6344
6345 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6346 if (!tgt) {
6347 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6348 "Unable to allocate struct qla_tgt\n");
6349 return -ENOMEM;
6350 }
6351
6352 tgt->qphints = kcalloc(ha->max_qpairs + 1,
6353 sizeof(struct qla_qpair_hint),
6354 GFP_KERNEL);
6355 if (!tgt->qphints) {
6356 kfree(tgt);
6357 ql_log(ql_log_warn, base_vha, 0x0197,
6358 "Unable to allocate qpair hints.\n");
6359 return -ENOMEM;
6360 }
6361
6362 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6363 base_vha->host->hostt->supported_mode |= MODE_TARGET;
6364
6365 rc = btree_init64(&tgt->lun_qpair_map);
6366 if (rc) {
6367 kfree(tgt->qphints);
6368 kfree(tgt);
6369 ql_log(ql_log_info, base_vha, 0x0198,
6370 "Unable to initialize lun_qpair_map btree\n");
6371 return -EIO;
6372 }
6373 h = &tgt->qphints[0];
6374 h->qpair = ha->base_qpair;
6375 INIT_LIST_HEAD(&h->hint_elem);
6376 h->cpuid = ha->base_qpair->cpuid;
6377 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6378
6379 for (i = 0; i < ha->max_qpairs; i++) {
6380 unsigned long flags;
6381
6382 struct qla_qpair *qpair = ha->queue_pair_map[i];
6383
6384 h = &tgt->qphints[i + 1];
6385 INIT_LIST_HEAD(&h->hint_elem);
6386 if (qpair) {
6387 h->qpair = qpair;
6388 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6389 list_add_tail(&h->hint_elem, &qpair->hints_list);
6390 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6391 h->cpuid = qpair->cpuid;
6392 }
6393 }
6394
6395 tgt->ha = ha;
6396 tgt->vha = base_vha;
6397 init_waitqueue_head(&tgt->waitQ);
6398 INIT_LIST_HEAD(&tgt->del_sess_list);
6399 spin_lock_init(&tgt->sess_work_lock);
6400 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6401 INIT_LIST_HEAD(&tgt->sess_works_list);
6402 atomic_set(&tgt->tgt_global_resets_count, 0);
6403
6404 base_vha->vha_tgt.qla_tgt = tgt;
6405
6406 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6407 "qla_target(%d): using 64 Bit PCI addressing",
6408 base_vha->vp_idx);
6409 /* 3 is reserved */
6410 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6411
6412 mutex_lock(&qla_tgt_mutex);
6413 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6414 mutex_unlock(&qla_tgt_mutex);
6415
6416 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6417 ha->tgt.tgt_ops->add_target(base_vha);
6418
6419 return 0;
6420 }
6421
6422 /* Must be called under tgt_host_action_mutex */
qlt_remove_target(struct qla_hw_data * ha,struct scsi_qla_host * vha)6423 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6424 {
6425 if (!vha->vha_tgt.qla_tgt)
6426 return 0;
6427
6428 if (vha->fc_vport) {
6429 qlt_release(vha->vha_tgt.qla_tgt);
6430 return 0;
6431 }
6432
6433 /* free left over qfull cmds */
6434 qlt_init_term_exchange(vha);
6435
6436 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6437 vha->host_no, ha);
6438 qlt_release(vha->vha_tgt.qla_tgt);
6439
6440 return 0;
6441 }
6442
qlt_remove_target_resources(struct qla_hw_data * ha)6443 void qlt_remove_target_resources(struct qla_hw_data *ha)
6444 {
6445 struct scsi_qla_host *node;
6446 u32 key = 0;
6447
6448 btree_for_each_safe32(&ha->tgt.host_map, key, node)
6449 btree_remove32(&ha->tgt.host_map, key);
6450
6451 btree_destroy32(&ha->tgt.host_map);
6452 }
6453
qlt_lport_dump(struct scsi_qla_host * vha,u64 wwpn,unsigned char * b)6454 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6455 unsigned char *b)
6456 {
6457 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6458 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6459 put_unaligned_be64(wwpn, b);
6460 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6461 }
6462
6463 /**
6464 * qla_tgt_lport_register - register lport with external module
6465 *
6466 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6467 * @phys_wwpn: physical port WWPN
6468 * @npiv_wwpn: NPIV WWPN
6469 * @npiv_wwnn: NPIV WWNN
6470 * @callback: lport initialization callback for tcm_qla2xxx code
6471 */
qlt_lport_register(void * target_lport_ptr,u64 phys_wwpn,u64 npiv_wwpn,u64 npiv_wwnn,int (* callback)(struct scsi_qla_host *,void *,u64,u64))6472 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6473 u64 npiv_wwpn, u64 npiv_wwnn,
6474 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6475 {
6476 struct qla_tgt *tgt;
6477 struct scsi_qla_host *vha;
6478 struct qla_hw_data *ha;
6479 struct Scsi_Host *host;
6480 unsigned long flags;
6481 int rc;
6482 u8 b[WWN_SIZE];
6483
6484 mutex_lock(&qla_tgt_mutex);
6485 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6486 vha = tgt->vha;
6487 ha = vha->hw;
6488
6489 host = vha->host;
6490 if (!host)
6491 continue;
6492
6493 if (!(host->hostt->supported_mode & MODE_TARGET))
6494 continue;
6495
6496 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6497 continue;
6498
6499 spin_lock_irqsave(&ha->hardware_lock, flags);
6500 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6501 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6502 host->host_no);
6503 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6504 continue;
6505 }
6506 if (tgt->tgt_stop) {
6507 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6508 host->host_no);
6509 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6510 continue;
6511 }
6512 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6513
6514 if (!scsi_host_get(host)) {
6515 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6516 "Unable to scsi_host_get() for"
6517 " qla2xxx scsi_host\n");
6518 continue;
6519 }
6520 qlt_lport_dump(vha, phys_wwpn, b);
6521
6522 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6523 scsi_host_put(host);
6524 continue;
6525 }
6526 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6527 if (rc != 0)
6528 scsi_host_put(host);
6529
6530 mutex_unlock(&qla_tgt_mutex);
6531 return rc;
6532 }
6533 mutex_unlock(&qla_tgt_mutex);
6534
6535 return -ENODEV;
6536 }
6537 EXPORT_SYMBOL(qlt_lport_register);
6538
6539 /**
6540 * qla_tgt_lport_deregister - Degister lport
6541 *
6542 * @vha: Registered scsi_qla_host pointer
6543 */
qlt_lport_deregister(struct scsi_qla_host * vha)6544 void qlt_lport_deregister(struct scsi_qla_host *vha)
6545 {
6546 struct qla_hw_data *ha = vha->hw;
6547 struct Scsi_Host *sh = vha->host;
6548 /*
6549 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6550 */
6551 vha->vha_tgt.target_lport_ptr = NULL;
6552 ha->tgt.tgt_ops = NULL;
6553 /*
6554 * Release the Scsi_Host reference for the underlying qla2xxx host
6555 */
6556 scsi_host_put(sh);
6557 }
6558 EXPORT_SYMBOL(qlt_lport_deregister);
6559
6560 /* Must be called under HW lock */
qlt_set_mode(struct scsi_qla_host * vha)6561 void qlt_set_mode(struct scsi_qla_host *vha)
6562 {
6563 switch (vha->qlini_mode) {
6564 case QLA2XXX_INI_MODE_DISABLED:
6565 case QLA2XXX_INI_MODE_EXCLUSIVE:
6566 vha->host->active_mode = MODE_TARGET;
6567 break;
6568 case QLA2XXX_INI_MODE_ENABLED:
6569 vha->host->active_mode = MODE_INITIATOR;
6570 break;
6571 case QLA2XXX_INI_MODE_DUAL:
6572 vha->host->active_mode = MODE_DUAL;
6573 break;
6574 default:
6575 break;
6576 }
6577 }
6578
6579 /* Must be called under HW lock */
qlt_clear_mode(struct scsi_qla_host * vha)6580 static void qlt_clear_mode(struct scsi_qla_host *vha)
6581 {
6582 switch (vha->qlini_mode) {
6583 case QLA2XXX_INI_MODE_DISABLED:
6584 vha->host->active_mode = MODE_UNKNOWN;
6585 break;
6586 case QLA2XXX_INI_MODE_EXCLUSIVE:
6587 vha->host->active_mode = MODE_INITIATOR;
6588 break;
6589 case QLA2XXX_INI_MODE_ENABLED:
6590 case QLA2XXX_INI_MODE_DUAL:
6591 vha->host->active_mode = MODE_INITIATOR;
6592 break;
6593 default:
6594 break;
6595 }
6596 }
6597
6598 /*
6599 * qla_tgt_enable_vha - NO LOCK HELD
6600 *
6601 * host_reset, bring up w/ Target Mode Enabled
6602 */
6603 void
qlt_enable_vha(struct scsi_qla_host * vha)6604 qlt_enable_vha(struct scsi_qla_host *vha)
6605 {
6606 struct qla_hw_data *ha = vha->hw;
6607 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6608 unsigned long flags;
6609 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6610
6611 if (!tgt) {
6612 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6613 "Unable to locate qla_tgt pointer from"
6614 " struct qla_hw_data\n");
6615 dump_stack();
6616 return;
6617 }
6618 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6619 return;
6620
6621 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6622 ha->tgt.num_act_qpairs = ha->max_qpairs;
6623 spin_lock_irqsave(&ha->hardware_lock, flags);
6624 tgt->tgt_stopped = 0;
6625 qlt_set_mode(vha);
6626 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6627
6628 mutex_lock(&ha->optrom_mutex);
6629 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6630 "%s.\n", __func__);
6631 if (vha->vp_idx) {
6632 qla24xx_disable_vp(vha);
6633 qla24xx_enable_vp(vha);
6634 } else {
6635 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6636 qla2xxx_wake_dpc(base_vha);
6637 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6638 QLA_SUCCESS);
6639 }
6640 mutex_unlock(&ha->optrom_mutex);
6641 }
6642 EXPORT_SYMBOL(qlt_enable_vha);
6643
6644 /*
6645 * qla_tgt_disable_vha - NO LOCK HELD
6646 *
6647 * Disable Target Mode and reset the adapter
6648 */
qlt_disable_vha(struct scsi_qla_host * vha)6649 static void qlt_disable_vha(struct scsi_qla_host *vha)
6650 {
6651 struct qla_hw_data *ha = vha->hw;
6652 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6653 unsigned long flags;
6654
6655 if (!tgt) {
6656 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6657 "Unable to locate qla_tgt pointer from"
6658 " struct qla_hw_data\n");
6659 dump_stack();
6660 return;
6661 }
6662
6663 spin_lock_irqsave(&ha->hardware_lock, flags);
6664 qlt_clear_mode(vha);
6665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6666
6667 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6668 qla2xxx_wake_dpc(vha);
6669
6670 /*
6671 * We are expecting the offline state.
6672 * QLA_FUNCTION_FAILED means that adapter is offline.
6673 */
6674 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6675 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6676 "adapter is offline\n");
6677 }
6678
6679 /*
6680 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6681 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6682 * members.
6683 */
6684 void
qlt_vport_create(struct scsi_qla_host * vha,struct qla_hw_data * ha)6685 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6686 {
6687 vha->vha_tgt.qla_tgt = NULL;
6688
6689 mutex_init(&vha->vha_tgt.tgt_mutex);
6690 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6691
6692 qlt_clear_mode(vha);
6693
6694 /*
6695 * NOTE: Currently the value is kept the same for <24xx and
6696 * >=24xx ISPs. If it is necessary to change it,
6697 * the check should be added for specific ISPs,
6698 * assigning the value appropriately.
6699 */
6700 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6701
6702 qlt_add_target(ha, vha);
6703 }
6704
6705 u8
qlt_rff_id(struct scsi_qla_host * vha)6706 qlt_rff_id(struct scsi_qla_host *vha)
6707 {
6708 u8 fc4_feature = 0;
6709 /*
6710 * FC-4 Feature bit 0 indicates target functionality to the name server.
6711 */
6712 if (qla_tgt_mode_enabled(vha)) {
6713 fc4_feature = BIT_0;
6714 } else if (qla_ini_mode_enabled(vha)) {
6715 fc4_feature = BIT_1;
6716 } else if (qla_dual_mode_enabled(vha))
6717 fc4_feature = BIT_0 | BIT_1;
6718
6719 return fc4_feature;
6720 }
6721
6722 /*
6723 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6724 * @ha: HA context
6725 *
6726 * Beginning of ATIO ring has initialization control block already built
6727 * by nvram config routine.
6728 *
6729 * Returns 0 on success.
6730 */
6731 void
qlt_init_atio_q_entries(struct scsi_qla_host * vha)6732 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6733 {
6734 struct qla_hw_data *ha = vha->hw;
6735 uint16_t cnt;
6736 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6737
6738 if (qla_ini_mode_enabled(vha))
6739 return;
6740
6741 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6742 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6743 pkt++;
6744 }
6745
6746 }
6747
6748 /*
6749 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6750 * @ha: SCSI driver HA context
6751 */
6752 void
qlt_24xx_process_atio_queue(struct scsi_qla_host * vha,uint8_t ha_locked)6753 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6754 {
6755 struct qla_hw_data *ha = vha->hw;
6756 struct atio_from_isp *pkt;
6757 int cnt, i;
6758
6759 if (!ha->flags.fw_started)
6760 return;
6761
6762 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6763 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6764 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6765 cnt = pkt->u.raw.entry_count;
6766
6767 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6768 /*
6769 * This packet is corrupted. The header + payload
6770 * can not be trusted. There is no point in passing
6771 * it further up.
6772 */
6773 ql_log(ql_log_warn, vha, 0xd03c,
6774 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6775 &pkt->u.isp24.fcp_hdr.s_id,
6776 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6777 pkt->u.isp24.exchange_addr, pkt);
6778
6779 adjust_corrupted_atio(pkt);
6780 qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6781 ha_locked, 0);
6782 } else {
6783 qlt_24xx_atio_pkt_all_vps(vha,
6784 (struct atio_from_isp *)pkt, ha_locked);
6785 }
6786
6787 for (i = 0; i < cnt; i++) {
6788 ha->tgt.atio_ring_index++;
6789 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6790 ha->tgt.atio_ring_index = 0;
6791 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6792 } else
6793 ha->tgt.atio_ring_ptr++;
6794
6795 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6796 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6797 }
6798 wmb();
6799 }
6800
6801 /* Adjust ring index */
6802 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6803 }
6804
6805 void
qlt_24xx_config_rings(struct scsi_qla_host * vha)6806 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6807 {
6808 struct qla_hw_data *ha = vha->hw;
6809 struct qla_msix_entry *msix = &ha->msix_entries[2];
6810 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6811
6812 if (!QLA_TGT_MODE_ENABLED())
6813 return;
6814
6815 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6816 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6817 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6818
6819 if (ha->flags.msix_enabled) {
6820 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6821 if (IS_QLA2071(ha)) {
6822 /* 4 ports Baker: Enable Interrupt Handshake */
6823 icb->msix_atio = 0;
6824 icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6825 } else {
6826 icb->msix_atio = cpu_to_le16(msix->entry);
6827 icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
6828 }
6829 ql_dbg(ql_dbg_init, vha, 0xf072,
6830 "Registering ICB vector 0x%x for atio que.\n",
6831 msix->entry);
6832 }
6833 } else {
6834 /* INTx|MSI */
6835 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6836 icb->msix_atio = 0;
6837 icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6838 ql_dbg(ql_dbg_init, vha, 0xf072,
6839 "%s: Use INTx for ATIOQ.\n", __func__);
6840 }
6841 }
6842 }
6843
6844 void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_24xx * nv)6845 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6846 {
6847 struct qla_hw_data *ha = vha->hw;
6848 u32 tmp;
6849
6850 if (!QLA_TGT_MODE_ENABLED())
6851 return;
6852
6853 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6854 if (!ha->tgt.saved_set) {
6855 /* We save only once */
6856 ha->tgt.saved_exchange_count = nv->exchange_count;
6857 ha->tgt.saved_firmware_options_1 =
6858 nv->firmware_options_1;
6859 ha->tgt.saved_firmware_options_2 =
6860 nv->firmware_options_2;
6861 ha->tgt.saved_firmware_options_3 =
6862 nv->firmware_options_3;
6863 ha->tgt.saved_set = 1;
6864 }
6865
6866 if (qla_tgt_mode_enabled(vha))
6867 nv->exchange_count = cpu_to_le16(0xFFFF);
6868 else /* dual */
6869 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6870
6871 /* Enable target mode */
6872 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6873
6874 /* Disable ini mode, if requested */
6875 if (qla_tgt_mode_enabled(vha))
6876 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6877
6878 /* Disable Full Login after LIP */
6879 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6880 /* Enable initial LIP */
6881 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6882 if (ql2xtgt_tape_enable)
6883 /* Enable FC Tape support */
6884 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6885 else
6886 /* Disable FC Tape support */
6887 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6888
6889 /* Disable Full Login after LIP */
6890 nv->host_p &= cpu_to_le32(~BIT_10);
6891
6892 /*
6893 * clear BIT 15 explicitly as we have seen at least
6894 * a couple of instances where this was set and this
6895 * was causing the firmware to not be initialized.
6896 */
6897 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6898 /* Enable target PRLI control */
6899 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6900
6901 if (IS_QLA25XX(ha)) {
6902 /* Change Loop-prefer to Pt-Pt */
6903 tmp = ~(BIT_4|BIT_5|BIT_6);
6904 nv->firmware_options_2 &= cpu_to_le32(tmp);
6905 tmp = P2P << 4;
6906 nv->firmware_options_2 |= cpu_to_le32(tmp);
6907 }
6908 } else {
6909 if (ha->tgt.saved_set) {
6910 nv->exchange_count = ha->tgt.saved_exchange_count;
6911 nv->firmware_options_1 =
6912 ha->tgt.saved_firmware_options_1;
6913 nv->firmware_options_2 =
6914 ha->tgt.saved_firmware_options_2;
6915 nv->firmware_options_3 =
6916 ha->tgt.saved_firmware_options_3;
6917 }
6918 return;
6919 }
6920
6921 if (ha->base_qpair->enable_class_2) {
6922 if (vha->flags.init_done)
6923 fc_host_supported_classes(vha->host) =
6924 FC_COS_CLASS2 | FC_COS_CLASS3;
6925
6926 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6927 } else {
6928 if (vha->flags.init_done)
6929 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6930
6931 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6932 }
6933 }
6934
6935 void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_24xx * icb)6936 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6937 struct init_cb_24xx *icb)
6938 {
6939 struct qla_hw_data *ha = vha->hw;
6940
6941 if (!QLA_TGT_MODE_ENABLED())
6942 return;
6943
6944 if (ha->tgt.node_name_set) {
6945 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6946 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6947 }
6948 }
6949
6950 void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_81xx * nv)6951 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6952 {
6953 struct qla_hw_data *ha = vha->hw;
6954 u32 tmp;
6955
6956 if (!QLA_TGT_MODE_ENABLED())
6957 return;
6958
6959 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6960 if (!ha->tgt.saved_set) {
6961 /* We save only once */
6962 ha->tgt.saved_exchange_count = nv->exchange_count;
6963 ha->tgt.saved_firmware_options_1 =
6964 nv->firmware_options_1;
6965 ha->tgt.saved_firmware_options_2 =
6966 nv->firmware_options_2;
6967 ha->tgt.saved_firmware_options_3 =
6968 nv->firmware_options_3;
6969 ha->tgt.saved_set = 1;
6970 }
6971
6972 if (qla_tgt_mode_enabled(vha))
6973 nv->exchange_count = cpu_to_le16(0xFFFF);
6974 else /* dual */
6975 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6976
6977 /* Enable target mode */
6978 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6979
6980 /* Disable ini mode, if requested */
6981 if (qla_tgt_mode_enabled(vha))
6982 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6983 /* Disable Full Login after LIP */
6984 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6985 /* Enable initial LIP */
6986 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6987 /*
6988 * clear BIT 15 explicitly as we have seen at
6989 * least a couple of instances where this was set
6990 * and this was causing the firmware to not be
6991 * initialized.
6992 */
6993 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6994 if (ql2xtgt_tape_enable)
6995 /* Enable FC tape support */
6996 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6997 else
6998 /* Disable FC tape support */
6999 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
7000
7001 /* Disable Full Login after LIP */
7002 nv->host_p &= cpu_to_le32(~BIT_10);
7003 /* Enable target PRLI control */
7004 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7005
7006 /* Change Loop-prefer to Pt-Pt */
7007 tmp = ~(BIT_4|BIT_5|BIT_6);
7008 nv->firmware_options_2 &= cpu_to_le32(tmp);
7009 tmp = P2P << 4;
7010 nv->firmware_options_2 |= cpu_to_le32(tmp);
7011 } else {
7012 if (ha->tgt.saved_set) {
7013 nv->exchange_count = ha->tgt.saved_exchange_count;
7014 nv->firmware_options_1 =
7015 ha->tgt.saved_firmware_options_1;
7016 nv->firmware_options_2 =
7017 ha->tgt.saved_firmware_options_2;
7018 nv->firmware_options_3 =
7019 ha->tgt.saved_firmware_options_3;
7020 }
7021 return;
7022 }
7023
7024 if (ha->base_qpair->enable_class_2) {
7025 if (vha->flags.init_done)
7026 fc_host_supported_classes(vha->host) =
7027 FC_COS_CLASS2 | FC_COS_CLASS3;
7028
7029 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7030 } else {
7031 if (vha->flags.init_done)
7032 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7033
7034 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7035 }
7036 }
7037
7038 void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_81xx * icb)7039 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7040 struct init_cb_81xx *icb)
7041 {
7042 struct qla_hw_data *ha = vha->hw;
7043
7044 if (!QLA_TGT_MODE_ENABLED())
7045 return;
7046
7047 if (ha->tgt.node_name_set) {
7048 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7049 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7050 }
7051 }
7052
7053 void
qlt_83xx_iospace_config(struct qla_hw_data * ha)7054 qlt_83xx_iospace_config(struct qla_hw_data *ha)
7055 {
7056 if (!QLA_TGT_MODE_ENABLED())
7057 return;
7058
7059 ha->msix_count += 1; /* For ATIO Q */
7060 }
7061
7062
7063 void
qlt_modify_vp_config(struct scsi_qla_host * vha,struct vp_config_entry_24xx * vpmod)7064 qlt_modify_vp_config(struct scsi_qla_host *vha,
7065 struct vp_config_entry_24xx *vpmod)
7066 {
7067 /* enable target mode. Bit5 = 1 => disable */
7068 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7069 vpmod->options_idx1 &= ~BIT_5;
7070
7071 /* Disable ini mode, if requested. bit4 = 1 => disable */
7072 if (qla_tgt_mode_enabled(vha))
7073 vpmod->options_idx1 &= ~BIT_4;
7074 }
7075
7076 void
qlt_probe_one_stage1(struct scsi_qla_host * base_vha,struct qla_hw_data * ha)7077 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7078 {
7079 int rc;
7080
7081 if (!QLA_TGT_MODE_ENABLED())
7082 return;
7083
7084 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
7085 IS_QLA28XX(ha)) {
7086 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7087 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7088 } else {
7089 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7090 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7091 }
7092
7093 mutex_init(&base_vha->vha_tgt.tgt_mutex);
7094 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7095
7096 INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7097 INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7098 qlt_unknown_atio_work_fn);
7099
7100 qlt_clear_mode(base_vha);
7101
7102 rc = btree_init32(&ha->tgt.host_map);
7103 if (rc)
7104 ql_log(ql_log_info, base_vha, 0xd03d,
7105 "Unable to initialize ha->host_map btree\n");
7106
7107 qlt_update_vp_map(base_vha, SET_VP_IDX);
7108 }
7109
7110 irqreturn_t
qla83xx_msix_atio_q(int irq,void * dev_id)7111 qla83xx_msix_atio_q(int irq, void *dev_id)
7112 {
7113 struct rsp_que *rsp;
7114 scsi_qla_host_t *vha;
7115 struct qla_hw_data *ha;
7116 unsigned long flags;
7117
7118 rsp = (struct rsp_que *) dev_id;
7119 ha = rsp->hw;
7120 vha = pci_get_drvdata(ha->pdev);
7121
7122 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7123
7124 qlt_24xx_process_atio_queue(vha, 0);
7125
7126 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7127
7128 return IRQ_HANDLED;
7129 }
7130
7131 static void
qlt_handle_abts_recv_work(struct work_struct * work)7132 qlt_handle_abts_recv_work(struct work_struct *work)
7133 {
7134 struct qla_tgt_sess_op *op = container_of(work,
7135 struct qla_tgt_sess_op, work);
7136 scsi_qla_host_t *vha = op->vha;
7137 struct qla_hw_data *ha = vha->hw;
7138 unsigned long flags;
7139
7140 if (qla2x00_reset_active(vha) ||
7141 (op->chip_reset != ha->base_qpair->chip_reset))
7142 return;
7143
7144 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7145 qlt_24xx_process_atio_queue(vha, 0);
7146 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7147
7148 spin_lock_irqsave(&ha->hardware_lock, flags);
7149 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7150 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7151
7152 kfree(op);
7153 }
7154
7155 void
qlt_handle_abts_recv(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)7156 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7157 response_t *pkt)
7158 {
7159 struct qla_tgt_sess_op *op;
7160
7161 op = kzalloc(sizeof(*op), GFP_ATOMIC);
7162
7163 if (!op) {
7164 /* do not reach for ATIO queue here. This is best effort err
7165 * recovery at this point.
7166 */
7167 qlt_response_pkt_all_vps(vha, rsp, pkt);
7168 return;
7169 }
7170
7171 memcpy(&op->atio, pkt, sizeof(*pkt));
7172 op->vha = vha;
7173 op->chip_reset = vha->hw->base_qpair->chip_reset;
7174 op->rsp = rsp;
7175 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7176 queue_work(qla_tgt_wq, &op->work);
7177 return;
7178 }
7179
7180 int
qlt_mem_alloc(struct qla_hw_data * ha)7181 qlt_mem_alloc(struct qla_hw_data *ha)
7182 {
7183 if (!QLA_TGT_MODE_ENABLED())
7184 return 0;
7185
7186 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7187 sizeof(struct qla_tgt_vp_map),
7188 GFP_KERNEL);
7189 if (!ha->tgt.tgt_vp_map)
7190 return -ENOMEM;
7191
7192 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7193 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7194 &ha->tgt.atio_dma, GFP_KERNEL);
7195 if (!ha->tgt.atio_ring) {
7196 kfree(ha->tgt.tgt_vp_map);
7197 return -ENOMEM;
7198 }
7199 return 0;
7200 }
7201
7202 void
qlt_mem_free(struct qla_hw_data * ha)7203 qlt_mem_free(struct qla_hw_data *ha)
7204 {
7205 if (!QLA_TGT_MODE_ENABLED())
7206 return;
7207
7208 if (ha->tgt.atio_ring) {
7209 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7210 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7211 ha->tgt.atio_dma);
7212 }
7213 ha->tgt.atio_ring = NULL;
7214 ha->tgt.atio_dma = 0;
7215 kfree(ha->tgt.tgt_vp_map);
7216 ha->tgt.tgt_vp_map = NULL;
7217 }
7218
7219 /* vport_slock to be held by the caller */
7220 void
qlt_update_vp_map(struct scsi_qla_host * vha,int cmd)7221 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7222 {
7223 void *slot;
7224 u32 key;
7225 int rc;
7226
7227 if (!QLA_TGT_MODE_ENABLED())
7228 return;
7229
7230 key = vha->d_id.b24;
7231
7232 switch (cmd) {
7233 case SET_VP_IDX:
7234 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7235 break;
7236 case SET_AL_PA:
7237 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7238 if (!slot) {
7239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7240 "Save vha in host_map %p %06x\n", vha, key);
7241 rc = btree_insert32(&vha->hw->tgt.host_map,
7242 key, vha, GFP_ATOMIC);
7243 if (rc)
7244 ql_log(ql_log_info, vha, 0xd03e,
7245 "Unable to insert s_id into host_map: %06x\n",
7246 key);
7247 return;
7248 }
7249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7250 "replace existing vha in host_map %p %06x\n", vha, key);
7251 btree_update32(&vha->hw->tgt.host_map, key, vha);
7252 break;
7253 case RESET_VP_IDX:
7254 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7255 break;
7256 case RESET_AL_PA:
7257 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7258 "clear vha in host_map %p %06x\n", vha, key);
7259 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7260 if (slot)
7261 btree_remove32(&vha->hw->tgt.host_map, key);
7262 vha->d_id.b24 = 0;
7263 break;
7264 }
7265 }
7266
qlt_update_host_map(struct scsi_qla_host * vha,port_id_t id)7267 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7268 {
7269
7270 if (!vha->d_id.b24) {
7271 vha->d_id = id;
7272 qlt_update_vp_map(vha, SET_AL_PA);
7273 } else if (vha->d_id.b24 != id.b24) {
7274 qlt_update_vp_map(vha, RESET_AL_PA);
7275 vha->d_id = id;
7276 qlt_update_vp_map(vha, SET_AL_PA);
7277 }
7278 }
7279
qlt_parse_ini_mode(void)7280 static int __init qlt_parse_ini_mode(void)
7281 {
7282 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7283 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7284 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7285 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7286 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7287 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7288 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7289 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7290 else
7291 return false;
7292
7293 return true;
7294 }
7295
qlt_init(void)7296 int __init qlt_init(void)
7297 {
7298 int ret;
7299
7300 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7301 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7302
7303 if (!qlt_parse_ini_mode()) {
7304 ql_log(ql_log_fatal, NULL, 0xe06b,
7305 "qlt_parse_ini_mode() failed\n");
7306 return -EINVAL;
7307 }
7308
7309 if (!QLA_TGT_MODE_ENABLED())
7310 return 0;
7311
7312 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7313 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7314 qla_tgt_mgmt_cmd), 0, NULL);
7315 if (!qla_tgt_mgmt_cmd_cachep) {
7316 ql_log(ql_log_fatal, NULL, 0xd04b,
7317 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7318 return -ENOMEM;
7319 }
7320
7321 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7322 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7323 0, NULL);
7324
7325 if (!qla_tgt_plogi_cachep) {
7326 ql_log(ql_log_fatal, NULL, 0xe06d,
7327 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7328 ret = -ENOMEM;
7329 goto out_mgmt_cmd_cachep;
7330 }
7331
7332 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7333 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7334 if (!qla_tgt_mgmt_cmd_mempool) {
7335 ql_log(ql_log_fatal, NULL, 0xe06e,
7336 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7337 ret = -ENOMEM;
7338 goto out_plogi_cachep;
7339 }
7340
7341 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7342 if (!qla_tgt_wq) {
7343 ql_log(ql_log_fatal, NULL, 0xe06f,
7344 "alloc_workqueue for qla_tgt_wq failed\n");
7345 ret = -ENOMEM;
7346 goto out_cmd_mempool;
7347 }
7348 /*
7349 * Return 1 to signal that initiator-mode is being disabled
7350 */
7351 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7352
7353 out_cmd_mempool:
7354 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7355 out_plogi_cachep:
7356 kmem_cache_destroy(qla_tgt_plogi_cachep);
7357 out_mgmt_cmd_cachep:
7358 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7359 return ret;
7360 }
7361
qlt_exit(void)7362 void qlt_exit(void)
7363 {
7364 if (!QLA_TGT_MODE_ENABLED())
7365 return;
7366
7367 destroy_workqueue(qla_tgt_wq);
7368 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7369 kmem_cache_destroy(qla_tgt_plogi_cachep);
7370 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7371 }
7372