1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10
11 static struct dentry *qla2x00_dfs_root;
12 static atomic_t qla2x00_dfs_root_count;
13
14 #define QLA_DFS_RPORT_DEVLOSS_TMO 1
15
16 static int
qla_dfs_rport_get(struct fc_port * fp,int attr_id,u64 * val)17 qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18 {
19 switch (attr_id) {
20 case QLA_DFS_RPORT_DEVLOSS_TMO:
21 /* Only supported for FC-NVMe devices that are registered. */
22 if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23 return -EIO;
24 *val = fp->nvme_remote_port->dev_loss_tmo;
25 break;
26 default:
27 return -EINVAL;
28 }
29 return 0;
30 }
31
32 static int
qla_dfs_rport_set(struct fc_port * fp,int attr_id,u64 val)33 qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34 {
35 switch (attr_id) {
36 case QLA_DFS_RPORT_DEVLOSS_TMO:
37 /* Only supported for FC-NVMe devices that are registered. */
38 if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39 return -EIO;
40 #if (IS_ENABLED(CONFIG_NVME_FC))
41 return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42 val);
43 #else /* CONFIG_NVME_FC */
44 return -EINVAL;
45 #endif /* CONFIG_NVME_FC */
46 default:
47 return -EINVAL;
48 }
49 return 0;
50 }
51
52 #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
53 static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
54 { \
55 struct fc_port *fp = data; \
56 return qla_dfs_rport_get(fp, _attr_id, val); \
57 } \
58 static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
59 { \
60 struct fc_port *fp = data; \
61 return qla_dfs_rport_set(fp, _attr_id, val); \
62 } \
63 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
64 qla_dfs_rport_##_attr##_get, \
65 qla_dfs_rport_##_attr##_set, "%llu\n")
66
67 /*
68 * Wrapper for getting fc_port fields.
69 *
70 * _attr : Attribute name.
71 * _get_val : Accessor macro to retrieve the value.
72 */
73 #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
74 static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
75 { \
76 struct fc_port *fp = data; \
77 *val = _get_val; \
78 return 0; \
79 } \
80 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
81 qla_dfs_rport_field_##_attr##_get, \
82 NULL, "%llu\n")
83
84 #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85 DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86
87 #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88 DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89
90 DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91
92 DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93 DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94 DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95 DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96 DEFINE_QLA_DFS_RPORT_FIELD(flags);
97 DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98 DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99 DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100 DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101 DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102 DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103 DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104
105 void
qla2x00_dfs_create_rport(scsi_qla_host_t * vha,struct fc_port * fp)106 qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107 {
108 char wwn[32];
109
110 #define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
111 debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
112 fp, &qla_dfs_rport_field_##_attr##_fops)
113
114 if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115 return;
116
117 sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118 fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119 if (IS_ERR(fp->dfs_rport_dir))
120 return;
121 if (NVME_TARGET(vha->hw, fp))
122 debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123 fp, &qla_dfs_rport_dev_loss_tmo_fops);
124
125 QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126 QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127 QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128 QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129 QLA_CREATE_RPORT_FIELD_ATTR(flags);
130 QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131 QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132 QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133 QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134 QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135 QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136 QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137 }
138
139 void
qla2x00_dfs_remove_rport(scsi_qla_host_t * vha,struct fc_port * fp)140 qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141 {
142 if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143 return;
144 debugfs_remove_recursive(fp->dfs_rport_dir);
145 fp->dfs_rport_dir = NULL;
146 }
147
148 static int
qla2x00_dfs_tgt_sess_show(struct seq_file * s,void * unused)149 qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
150 {
151 scsi_qla_host_t *vha = s->private;
152 struct qla_hw_data *ha = vha->hw;
153 unsigned long flags;
154 struct fc_port *sess = NULL;
155 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
156
157 seq_printf(s, "%s\n", vha->host_str);
158 if (tgt) {
159 seq_puts(s, "Port ID Port Name Handle\n");
160
161 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
162 list_for_each_entry(sess, &vha->vp_fcports, list)
163 seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
164 sess->d_id.b.domain, sess->d_id.b.area,
165 sess->d_id.b.al_pa, sess->port_name,
166 sess->loop_id);
167 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
168 }
169
170 return 0;
171 }
172
173 DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
174
175 static int
qla2x00_dfs_tgt_port_database_show(struct seq_file * s,void * unused)176 qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
177 {
178 scsi_qla_host_t *vha = s->private;
179 struct qla_hw_data *ha = vha->hw;
180 struct gid_list_info *gid_list;
181 dma_addr_t gid_list_dma;
182 char *id_iter;
183 int rc, i;
184 uint16_t entries;
185
186 seq_printf(s, "%s\n", vha->host_str);
187 gid_list = dma_alloc_coherent(&ha->pdev->dev,
188 qla2x00_gid_list_size(ha),
189 &gid_list_dma, GFP_KERNEL);
190 if (!gid_list) {
191 ql_dbg(ql_dbg_user, vha, 0x7018,
192 "DMA allocation failed for %u\n",
193 qla2x00_gid_list_size(ha));
194 return 0;
195 }
196
197 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
198 &entries);
199 if (rc != QLA_SUCCESS)
200 goto out_free_id_list;
201
202 id_iter = (char *)gid_list;
203
204 seq_puts(s, "Port Name Port ID Loop ID\n");
205
206 for (i = 0; i < entries; i++) {
207 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
208
209 rc = qla24xx_print_fc_port_id(vha, s, le16_to_cpu(gid->loop_id));
210 if (rc != QLA_SUCCESS)
211 break;
212 id_iter += ha->gid_list_info_size;
213 }
214 out_free_id_list:
215 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
216 gid_list, gid_list_dma);
217
218 return 0;
219 }
220
221 DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
222
223 static int
qla_dfs_fw_resource_cnt_show(struct seq_file * s,void * unused)224 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
225 {
226 struct scsi_qla_host *vha = s->private;
227 uint16_t mb[MAX_IOCB_MB_REG];
228 int rc;
229 struct qla_hw_data *ha = vha->hw;
230 u16 iocbs_used, i, exch_used;
231
232 rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
233 if (rc != QLA_SUCCESS) {
234 seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
235 } else {
236 seq_puts(s, "FW Resource count\n\n");
237 seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
238 seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
239 seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
240 seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
241 seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
242 seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
243 seq_printf(s, "MAX VP count[%d]\n", mb[11]);
244 seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
245 seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
246 mb[20]);
247 seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
248 mb[21]);
249 seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
250 mb[22]);
251 seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
252 mb[23]);
253 }
254
255 if (ql2xenforce_iocb_limit) {
256 /* lock is not require. It's an estimate. */
257 iocbs_used = ha->base_qpair->fwres.iocbs_used;
258 exch_used = ha->base_qpair->fwres.exch_used;
259 for (i = 0; i < ha->max_qpairs; i++) {
260 if (ha->queue_pair_map[i]) {
261 iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
262 exch_used += ha->queue_pair_map[i]->fwres.exch_used;
263 }
264 }
265
266 seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
267 iocbs_used, ha->base_qpair->fwres.iocbs_limit);
268
269 seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
270 exch_used, ha->base_qpair->fwres.exch_limit);
271
272 if (ql2xenforce_iocb_limit == 2) {
273 iocbs_used = atomic_read(&ha->fwres.iocb_used);
274 exch_used = atomic_read(&ha->fwres.exch_used);
275 seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
276 iocbs_used, ha->fwres.iocb_limit);
277
278 seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
279 exch_used, ha->fwres.exch_limit);
280 }
281 }
282
283 return 0;
284 }
285
286 DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
287
288 static int
qla_dfs_tgt_counters_show(struct seq_file * s,void * unused)289 qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
290 {
291 struct scsi_qla_host *vha = s->private;
292 struct qla_qpair *qpair = vha->hw->base_qpair;
293 uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
294 core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
295 num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
296 u16 i;
297 fc_port_t *fcport = NULL;
298
299 if (qla2x00_chip_is_down(vha))
300 return 0;
301
302 qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
303 core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
304 qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
305 core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
306 qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
307 core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
308 num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
309 num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
310 num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
311
312 for (i = 0; i < vha->hw->max_qpairs; i++) {
313 qpair = vha->hw->queue_pair_map[i];
314 if (!qpair)
315 continue;
316 qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
317 core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
318 qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
319 core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
320 qla_core_ret_sta_ctio +=
321 qpair->tgt_counters.qla_core_ret_sta_ctio;
322 core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
323 num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
324 num_alloc_iocb_failed +=
325 qpair->tgt_counters.num_alloc_iocb_failed;
326 num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
327 }
328
329 seq_puts(s, "Target Counters\n");
330 seq_printf(s, "qla_core_sbt_cmd = %lld\n",
331 qla_core_sbt_cmd);
332 seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
333 qla_core_ret_sta_ctio);
334 seq_printf(s, "qla_core_ret_ctio = %lld\n",
335 qla_core_ret_ctio);
336 seq_printf(s, "core_qla_que_buf = %lld\n",
337 core_qla_que_buf);
338 seq_printf(s, "core_qla_snd_status = %lld\n",
339 core_qla_snd_status);
340 seq_printf(s, "core_qla_free_cmd = %lld\n",
341 core_qla_free_cmd);
342 seq_printf(s, "num alloc iocb failed = %lld\n",
343 num_alloc_iocb_failed);
344 seq_printf(s, "num term exchange sent = %lld\n",
345 num_term_xchg_sent);
346 seq_printf(s, "num Q full sent = %lld\n",
347 num_q_full_sent);
348
349 /* DIF stats */
350 seq_printf(s, "DIF Inp Bytes = %lld\n",
351 vha->qla_stats.qla_dif_stats.dif_input_bytes);
352 seq_printf(s, "DIF Outp Bytes = %lld\n",
353 vha->qla_stats.qla_dif_stats.dif_output_bytes);
354 seq_printf(s, "DIF Inp Req = %lld\n",
355 vha->qla_stats.qla_dif_stats.dif_input_requests);
356 seq_printf(s, "DIF Outp Req = %lld\n",
357 vha->qla_stats.qla_dif_stats.dif_output_requests);
358 seq_printf(s, "DIF Guard err = %d\n",
359 vha->qla_stats.qla_dif_stats.dif_guard_err);
360 seq_printf(s, "DIF Ref tag err = %d\n",
361 vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
362 seq_printf(s, "DIF App tag err = %d\n",
363 vha->qla_stats.qla_dif_stats.dif_app_tag_err);
364
365 seq_puts(s, "\n");
366 seq_puts(s, "Initiator Error Counters\n");
367 seq_printf(s, "HW Error Count = %14lld\n",
368 vha->hw_err_cnt);
369 seq_printf(s, "Link Down Count = %14lld\n",
370 vha->short_link_down_cnt);
371 seq_printf(s, "Interface Err Count = %14lld\n",
372 vha->interface_err_cnt);
373 seq_printf(s, "Cmd Timeout Count = %14lld\n",
374 vha->cmd_timeout_cnt);
375 seq_printf(s, "Reset Count = %14lld\n",
376 vha->reset_cmd_err_cnt);
377 seq_puts(s, "\n");
378
379 list_for_each_entry(fcport, &vha->vp_fcports, list) {
380 if (!fcport->rport)
381 continue;
382
383 seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
384 fcport->rport->number, fcport->tgt_short_link_down_cnt);
385 }
386 seq_puts(s, "\n");
387
388 return 0;
389 }
390
391 DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
392
393 static int
qla2x00_dfs_fce_show(struct seq_file * s,void * unused)394 qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
395 {
396 scsi_qla_host_t *vha = s->private;
397 uint32_t cnt;
398 uint32_t *fce;
399 uint64_t fce_start;
400 struct qla_hw_data *ha = vha->hw;
401
402 mutex_lock(&ha->fce_mutex);
403
404 if (ha->flags.user_enabled_fce) {
405 seq_puts(s, "FCE Trace Buffer\n");
406 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
407 seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma);
408 seq_puts(s, "FCE Enable Registers\n");
409 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
410 ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
411 ha->fce_mb[5], ha->fce_mb[6]);
412
413 fce = (uint32_t *)ha->fce;
414 fce_start = (unsigned long long)ha->fce_dma;
415 for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
416 if (cnt % 8 == 0)
417 seq_printf(s, "\n%llx: ",
418 (unsigned long long)((cnt * 4) + fce_start));
419 else
420 seq_putc(s, ' ');
421 seq_printf(s, "%08x", *fce++);
422 }
423
424 seq_puts(s, "\nEnd\n");
425 } else {
426 seq_puts(s, "FCE Trace is currently not enabled\n");
427 seq_puts(s, "\techo [ 1 | 0 ] > fce\n");
428 }
429
430 mutex_unlock(&ha->fce_mutex);
431
432 return 0;
433 }
434
435 static int
qla2x00_dfs_fce_open(struct inode * inode,struct file * file)436 qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
437 {
438 scsi_qla_host_t *vha = inode->i_private;
439 struct qla_hw_data *ha = vha->hw;
440 int rval;
441
442 if (!ha->flags.fce_enabled)
443 goto out;
444
445 mutex_lock(&ha->fce_mutex);
446
447 /* Pause tracing to flush FCE buffers. */
448 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
449 if (rval)
450 ql_dbg(ql_dbg_user, vha, 0x705c,
451 "DebugFS: Unable to disable FCE (%d).\n", rval);
452
453 ha->flags.fce_enabled = 0;
454
455 mutex_unlock(&ha->fce_mutex);
456 out:
457 return single_open(file, qla2x00_dfs_fce_show, vha);
458 }
459
460 static int
qla2x00_dfs_fce_release(struct inode * inode,struct file * file)461 qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
462 {
463 scsi_qla_host_t *vha = inode->i_private;
464 struct qla_hw_data *ha = vha->hw;
465 int rval;
466
467 if (ha->flags.fce_enabled || !ha->fce)
468 goto out;
469
470 mutex_lock(&ha->fce_mutex);
471
472 /* Re-enable FCE tracing. */
473 ha->flags.fce_enabled = 1;
474 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
475 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
476 ha->fce_mb, &ha->fce_bufs);
477 if (rval) {
478 ql_dbg(ql_dbg_user, vha, 0x700d,
479 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
480 ha->flags.fce_enabled = 0;
481 }
482
483 mutex_unlock(&ha->fce_mutex);
484 out:
485 return single_release(inode, file);
486 }
487
488 static ssize_t
qla2x00_dfs_fce_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)489 qla2x00_dfs_fce_write(struct file *file, const char __user *buffer,
490 size_t count, loff_t *pos)
491 {
492 struct seq_file *s = file->private_data;
493 struct scsi_qla_host *vha = s->private;
494 struct qla_hw_data *ha = vha->hw;
495 char *buf;
496 int rc = 0;
497 unsigned long enable;
498
499 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
500 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
501 ql_dbg(ql_dbg_user, vha, 0xd034,
502 "this adapter does not support FCE.");
503 return -EINVAL;
504 }
505
506 buf = memdup_user_nul(buffer, count);
507 if (IS_ERR(buf)) {
508 ql_dbg(ql_dbg_user, vha, 0xd037,
509 "fail to copy user buffer.");
510 return PTR_ERR(buf);
511 }
512
513 enable = kstrtoul(buf, 0, 0);
514 rc = count;
515
516 mutex_lock(&ha->fce_mutex);
517
518 if (enable) {
519 if (ha->flags.user_enabled_fce) {
520 mutex_unlock(&ha->fce_mutex);
521 goto out_free;
522 }
523 ha->flags.user_enabled_fce = 1;
524 if (!ha->fce) {
525 rc = qla2x00_alloc_fce_trace(vha);
526 if (rc) {
527 ha->flags.user_enabled_fce = 0;
528 mutex_unlock(&ha->fce_mutex);
529 goto out_free;
530 }
531
532 /* adjust fw dump buffer to take into account of this feature */
533 if (!ha->flags.fce_dump_buf_alloced)
534 qla2x00_alloc_fw_dump(vha);
535 }
536
537 if (!ha->flags.fce_enabled)
538 qla_enable_fce_trace(vha);
539
540 ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n");
541 } else {
542 if (!ha->flags.user_enabled_fce) {
543 mutex_unlock(&ha->fce_mutex);
544 goto out_free;
545 }
546 ha->flags.user_enabled_fce = 0;
547 if (ha->flags.fce_enabled) {
548 qla2x00_disable_fce_trace(vha, NULL, NULL);
549 ha->flags.fce_enabled = 0;
550 }
551
552 qla2x00_free_fce_trace(ha);
553 /* no need to re-adjust fw dump buffer */
554
555 ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n");
556 }
557
558 mutex_unlock(&ha->fce_mutex);
559 out_free:
560 kfree(buf);
561 return rc;
562 }
563
564 static const struct file_operations dfs_fce_ops = {
565 .open = qla2x00_dfs_fce_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = qla2x00_dfs_fce_release,
569 .write = qla2x00_dfs_fce_write,
570 };
571
572 static int
qla_dfs_naqp_show(struct seq_file * s,void * unused)573 qla_dfs_naqp_show(struct seq_file *s, void *unused)
574 {
575 struct scsi_qla_host *vha = s->private;
576 struct qla_hw_data *ha = vha->hw;
577
578 seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
579 return 0;
580 }
581
582 /*
583 * Helper macros for setting up debugfs entries.
584 * _name: The name of the debugfs entry
585 * _ctx_struct: The context that was passed when creating the debugfs file
586 *
587 * QLA_DFS_SETUP_RD could be used when there is only a show function.
588 * - show function take the name qla_dfs_<sysfs-name>_show
589 *
590 * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
591 * - show function take the name qla_dfs_<sysfs-name>_show
592 * - write function take the name qla_dfs_<sysfs-name>_write
593 *
594 * To have a new debugfs entry, do:
595 * 1. Create a "struct dentry *" in the appropriate structure in the format
596 * dfs_<sysfs-name>
597 * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
598 * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
599 * or QLA_DFS_ROOT_CREATE_FILE
600 * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
601 * or QLA_DFS_ROOT_REMOVE_FILE
602 *
603 * Example for creating "TEST" sysfs file:
604 * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
605 * 2. QLA_DFS_SETUP_RD(TEST);
606 * 3. In qla2x00_dfs_setup():
607 * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
608 * 4. In qla2x00_dfs_remove():
609 * QLA_DFS_REMOVE_FILE(ha, TEST);
610 */
611 #define QLA_DFS_SETUP_RD(_name) DEFINE_SHOW_ATTRIBUTE(qla_dfs_##_name)
612
613 #define QLA_DFS_SETUP_RW(_name) DEFINE_SHOW_STORE_ATTRIBUTE(qla_dfs_##_name)
614
615 #define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
616 do { \
617 if (!qla_dfs_##_name) \
618 qla_dfs_##_name = debugfs_create_file(#_name, \
619 _perm, qla2x00_dfs_root, _ctx, \
620 &qla_dfs_##_name##_fops); \
621 } while (0)
622
623 #define QLA_DFS_ROOT_REMOVE_FILE(_name) \
624 do { \
625 if (qla_dfs_##_name) { \
626 debugfs_remove(qla_dfs_##_name); \
627 qla_dfs_##_name = NULL; \
628 } \
629 } while (0)
630
631 #define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \
632 do { \
633 (_struct)->dfs_##_name = debugfs_create_file(#_name, \
634 _perm, _parent, _ctx, \
635 &qla_dfs_##_name##_fops) \
636 } while (0)
637
638 #define QLA_DFS_REMOVE_FILE(_struct, _name) \
639 do { \
640 if ((_struct)->dfs_##_name) { \
641 debugfs_remove((_struct)->dfs_##_name); \
642 (_struct)->dfs_##_name = NULL; \
643 } \
644 } while (0)
645
646 static ssize_t
qla_dfs_naqp_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)647 qla_dfs_naqp_write(struct file *file, const char __user *buffer,
648 size_t count, loff_t *pos)
649 {
650 struct seq_file *s = file->private_data;
651 struct scsi_qla_host *vha = s->private;
652 struct qla_hw_data *ha = vha->hw;
653 char *buf;
654 int rc = 0;
655 unsigned long num_act_qp;
656
657 if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
658 pr_err("host%ld: this adapter does not support Multi Q.",
659 vha->host_no);
660 return -EINVAL;
661 }
662
663 if (!vha->flags.qpairs_available) {
664 pr_err("host%ld: Driver is not setup with Multi Q.",
665 vha->host_no);
666 return -EINVAL;
667 }
668 buf = memdup_user_nul(buffer, count);
669 if (IS_ERR(buf)) {
670 pr_err("host%ld: fail to copy user buffer.",
671 vha->host_no);
672 return PTR_ERR(buf);
673 }
674
675 num_act_qp = simple_strtoul(buf, NULL, 0);
676
677 if (num_act_qp >= vha->hw->max_qpairs) {
678 pr_err("User set invalid number of qpairs %lu. Max = %d",
679 num_act_qp, vha->hw->max_qpairs);
680 rc = -EINVAL;
681 goto out_free;
682 }
683
684 if (num_act_qp != ha->tgt.num_act_qpairs) {
685 ha->tgt.num_act_qpairs = num_act_qp;
686 qlt_clr_qp_table(vha);
687 }
688 rc = count;
689 out_free:
690 kfree(buf);
691 return rc;
692 }
693 QLA_DFS_SETUP_RW(naqp);
694
695 int
qla2x00_dfs_setup(scsi_qla_host_t * vha)696 qla2x00_dfs_setup(scsi_qla_host_t *vha)
697 {
698 struct qla_hw_data *ha = vha->hw;
699
700 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
701 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
702 goto out;
703
704 if (qla2x00_dfs_root)
705 goto create_dir;
706
707 atomic_set(&qla2x00_dfs_root_count, 0);
708 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
709
710 create_dir:
711 if (ha->dfs_dir)
712 goto create_nodes;
713
714 mutex_init(&ha->fce_mutex);
715 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
716
717 atomic_inc(&qla2x00_dfs_root_count);
718
719 create_nodes:
720 ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
721 S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
722
723 ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
724 ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
725
726 ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
727 S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
728
729 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
730 &dfs_fce_ops);
731
732 ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
733 S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
734
735 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
736 ha->tgt.dfs_naqp = debugfs_create_file("naqp",
737 0400, ha->dfs_dir, vha, &qla_dfs_naqp_fops);
738 if (IS_ERR(ha->tgt.dfs_naqp)) {
739 ql_log(ql_log_warn, vha, 0xd011,
740 "Unable to create debugFS naqp node.\n");
741 goto out;
742 }
743 }
744 vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
745 if (IS_ERR(vha->dfs_rport_root)) {
746 ql_log(ql_log_warn, vha, 0xd012,
747 "Unable to create debugFS rports node.\n");
748 goto out;
749 }
750 out:
751 return 0;
752 }
753
754 int
qla2x00_dfs_remove(scsi_qla_host_t * vha)755 qla2x00_dfs_remove(scsi_qla_host_t *vha)
756 {
757 struct qla_hw_data *ha = vha->hw;
758
759 if (ha->tgt.dfs_naqp) {
760 debugfs_remove(ha->tgt.dfs_naqp);
761 ha->tgt.dfs_naqp = NULL;
762 }
763
764 if (ha->tgt.dfs_tgt_sess) {
765 debugfs_remove(ha->tgt.dfs_tgt_sess);
766 ha->tgt.dfs_tgt_sess = NULL;
767 }
768
769 if (ha->tgt.dfs_tgt_port_database) {
770 debugfs_remove(ha->tgt.dfs_tgt_port_database);
771 ha->tgt.dfs_tgt_port_database = NULL;
772 }
773
774 if (ha->dfs_fw_resource_cnt) {
775 debugfs_remove(ha->dfs_fw_resource_cnt);
776 ha->dfs_fw_resource_cnt = NULL;
777 }
778
779 if (ha->dfs_tgt_counters) {
780 debugfs_remove(ha->dfs_tgt_counters);
781 ha->dfs_tgt_counters = NULL;
782 }
783
784 if (ha->dfs_fce) {
785 debugfs_remove(ha->dfs_fce);
786 ha->dfs_fce = NULL;
787 }
788
789 if (vha->dfs_rport_root) {
790 debugfs_remove_recursive(vha->dfs_rport_root);
791 vha->dfs_rport_root = NULL;
792 }
793
794 if (ha->dfs_dir) {
795 debugfs_remove(ha->dfs_dir);
796 ha->dfs_dir = NULL;
797 atomic_dec(&qla2x00_dfs_root_count);
798 }
799
800 if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
801 qla2x00_dfs_root) {
802 debugfs_remove(qla2x00_dfs_root);
803 qla2x00_dfs_root = NULL;
804 }
805
806 return 0;
807 }
808