xref: /linux/drivers/scsi/mpi3mr/mpi3mr_app.c (revision 2c8c9aae4492f813b9b9ae95f0931945a693100e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/bsg-lib.h>
12 #include <uapi/scsi/scsi_bsg_mpi3mr.h>
13 
14 /**
15  * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
16  * @mrioc: Adapter instance reference
17  * @trace_size: Trace buffer size
18  *
19  * Allocate either segmented memory pools or contiguous buffer
20  * based on the controller capability for the host trace
21  * buffer.
22  *
23  * Return: 0 on success, non-zero on failure.
24  */
mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc * mrioc,u32 trace_size)25 static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
26 {
27 	struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
28 	int i, sz;
29 	u64 *diag_buffer_list = NULL;
30 	dma_addr_t diag_buffer_list_dma;
31 	u32 seg_count;
32 
33 	if (mrioc->seg_tb_support) {
34 		seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
35 		trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
36 
37 		diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
38 				sizeof(u64) * seg_count,
39 				&diag_buffer_list_dma, GFP_KERNEL);
40 		if (!diag_buffer_list)
41 			return -1;
42 
43 		mrioc->num_tb_segs = seg_count;
44 
45 		sz = sizeof(struct segments) * seg_count;
46 		mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
47 		if (!mrioc->trace_buf)
48 			goto trace_buf_failed;
49 
50 		mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
51 		    &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
52 		    0);
53 		if (!mrioc->trace_buf_pool) {
54 			ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
55 			goto trace_buf_pool_failed;
56 		}
57 
58 		for (i = 0; i < seg_count; i++) {
59 			mrioc->trace_buf[i].segment =
60 			    dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
61 			    &mrioc->trace_buf[i].segment_dma);
62 			diag_buffer_list[i] =
63 			    (u64) mrioc->trace_buf[i].segment_dma;
64 			if (!diag_buffer_list[i])
65 				goto tb_seg_alloc_failed;
66 		}
67 
68 		diag_buffer->addr =  diag_buffer_list;
69 		diag_buffer->dma_addr = diag_buffer_list_dma;
70 		diag_buffer->is_segmented = true;
71 
72 		dprint_init(mrioc, "segmented trace diag buffer\n"
73 				"is allocated successfully seg_count:%d\n", seg_count);
74 		return 0;
75 	} else {
76 		diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
77 		    trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
78 		if (diag_buffer->addr) {
79 			dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
80 			return 0;
81 		}
82 		return -1;
83 	}
84 
85 tb_seg_alloc_failed:
86 	if (mrioc->trace_buf_pool) {
87 		for (i = 0; i < mrioc->num_tb_segs; i++) {
88 			if (mrioc->trace_buf[i].segment) {
89 				dma_pool_free(mrioc->trace_buf_pool,
90 				    mrioc->trace_buf[i].segment,
91 				    mrioc->trace_buf[i].segment_dma);
92 				mrioc->trace_buf[i].segment = NULL;
93 			}
94 			mrioc->trace_buf[i].segment = NULL;
95 		}
96 		dma_pool_destroy(mrioc->trace_buf_pool);
97 		mrioc->trace_buf_pool = NULL;
98 	}
99 trace_buf_pool_failed:
100 	kfree(mrioc->trace_buf);
101 	mrioc->trace_buf = NULL;
102 trace_buf_failed:
103 	if (diag_buffer_list)
104 		dma_free_coherent(&mrioc->pdev->dev,
105 		    sizeof(u64) * mrioc->num_tb_segs,
106 		    diag_buffer_list, diag_buffer_list_dma);
107 	return -1;
108 }
109 
110 /**
111  * mpi3mr_alloc_diag_bufs - Allocate memory for diag buffers
112  * @mrioc: Adapter instance reference
113  *
114  * This functions checks whether the driver defined buffer sizes
115  * are greater than IOCFacts provided controller local buffer
116  * sizes and if the driver defined sizes are more then the
117  * driver allocates the specific buffer by reading driver page1
118  *
119  * Return: Nothing.
120  */
mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc * mrioc)121 void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc)
122 {
123 	struct diag_buffer_desc *diag_buffer;
124 	struct mpi3_driver_page1 driver_pg1;
125 	u32 trace_dec_size, trace_min_size, fw_dec_size, fw_min_size,
126 		trace_size, fw_size;
127 	u16 pg_sz = sizeof(driver_pg1);
128 	int retval = 0;
129 	bool retry = false;
130 
131 	if (mrioc->diag_buffers[0].addr || mrioc->diag_buffers[1].addr)
132 		return;
133 
134 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
135 	if (retval) {
136 		ioc_warn(mrioc,
137 		    "%s: driver page 1 read failed, allocating trace\n"
138 		    "and firmware diag buffers of default size\n", __func__);
139 		trace_size = fw_size = MPI3MR_DEFAULT_HDB_MAX_SZ;
140 		trace_dec_size = fw_dec_size = MPI3MR_DEFAULT_HDB_DEC_SZ;
141 		trace_min_size = fw_min_size = MPI3MR_DEFAULT_HDB_MIN_SZ;
142 
143 	} else {
144 		trace_size = driver_pg1.host_diag_trace_max_size * 1024;
145 		trace_dec_size = driver_pg1.host_diag_trace_decrement_size
146 			 * 1024;
147 		trace_min_size = driver_pg1.host_diag_trace_min_size * 1024;
148 		fw_size = driver_pg1.host_diag_fw_max_size * 1024;
149 		fw_dec_size = driver_pg1.host_diag_fw_decrement_size * 1024;
150 		fw_min_size = driver_pg1.host_diag_fw_min_size * 1024;
151 		dprint_init(mrioc,
152 		    "%s:trace diag buffer sizes read from driver\n"
153 		    "page1: maximum size = %dKB, decrement size = %dKB\n"
154 		    ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_trace_max_size,
155 		    driver_pg1.host_diag_trace_decrement_size,
156 		    driver_pg1.host_diag_trace_min_size);
157 		dprint_init(mrioc,
158 		    "%s:firmware diag buffer sizes read from driver\n"
159 		    "page1: maximum size = %dKB, decrement size = %dKB\n"
160 		    ", minimum size = %dKB\n", __func__, driver_pg1.host_diag_fw_max_size,
161 		    driver_pg1.host_diag_fw_decrement_size,
162 		    driver_pg1.host_diag_fw_min_size);
163 		if ((trace_size == 0) && (fw_size == 0))
164 			return;
165 	}
166 
167 
168 retry_trace:
169 	diag_buffer = &mrioc->diag_buffers[0];
170 	diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_TRACE;
171 	diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED;
172 	if ((mrioc->facts.diag_trace_sz < trace_size) && (trace_size >=
173 		trace_min_size)) {
174 		if (!retry)
175 			dprint_init(mrioc,
176 			    "trying to allocate trace diag buffer of size = %dKB\n",
177 			    trace_size / 1024);
178 		if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
179 		    mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
180 
181 			retry = true;
182 			trace_size -= trace_dec_size;
183 			dprint_init(mrioc, "trace diag buffer allocation failed\n"
184 			"retrying smaller size %dKB\n", trace_size / 1024);
185 			goto retry_trace;
186 		} else
187 			diag_buffer->size = trace_size;
188 	}
189 
190 	retry = false;
191 retry_fw:
192 
193 	diag_buffer = &mrioc->diag_buffers[1];
194 
195 	diag_buffer->type = MPI3_DIAG_BUFFER_TYPE_FW;
196 	diag_buffer->status = MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED;
197 	if ((mrioc->facts.diag_fw_sz < fw_size) && (fw_size >= fw_min_size)) {
198 		if (get_order(fw_size) <= MAX_PAGE_ORDER) {
199 			diag_buffer->addr
200 				= dma_alloc_coherent(&mrioc->pdev->dev, fw_size,
201 						     &diag_buffer->dma_addr,
202 						     GFP_KERNEL);
203 		}
204 		if (!retry)
205 			dprint_init(mrioc,
206 			    "%s:trying to allocate firmware diag buffer of size = %dKB\n",
207 			    __func__, fw_size / 1024);
208 		if (diag_buffer->addr) {
209 			dprint_init(mrioc, "%s:firmware diag buffer allocated successfully\n",
210 			    __func__);
211 			diag_buffer->size = fw_size;
212 		} else {
213 			retry = true;
214 			fw_size -= fw_dec_size;
215 			dprint_init(mrioc, "%s:trace diag buffer allocation failed,\n"
216 					"retrying smaller size %dKB\n",
217 					__func__, fw_size / 1024);
218 			goto retry_fw;
219 		}
220 	}
221 }
222 
223 /**
224  * mpi3mr_issue_diag_buf_post - Send diag buffer post req
225  * @mrioc: Adapter instance reference
226  * @diag_buffer: Diagnostic buffer descriptor
227  *
228  * Issue diagnostic buffer post MPI request through admin queue
229  * and wait for the completion of it or time out.
230  *
231  * Return: 0 on success, non-zero on failures.
232  */
mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc * mrioc,struct diag_buffer_desc * diag_buffer)233 int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
234 	struct diag_buffer_desc *diag_buffer)
235 {
236 	struct mpi3_diag_buffer_post_request diag_buf_post_req;
237 	u8 prev_status;
238 	int retval = 0;
239 
240 	if (diag_buffer->disabled_after_reset) {
241 		dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n"
242 				"as it is disabled after reset\n", __func__);
243 		return -1;
244 	}
245 
246 	memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
247 	mutex_lock(&mrioc->init_cmds.mutex);
248 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
249 		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
250 		mutex_unlock(&mrioc->init_cmds.mutex);
251 		return -1;
252 	}
253 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
254 	mrioc->init_cmds.is_waiting = 1;
255 	mrioc->init_cmds.callback = NULL;
256 	diag_buf_post_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
257 	diag_buf_post_req.function = MPI3_FUNCTION_DIAG_BUFFER_POST;
258 	diag_buf_post_req.type = diag_buffer->type;
259 	diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
260 	diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
261 
262 	if (diag_buffer->is_segmented)
263 		diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
264 
265 	dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
266 	    diag_buffer->type, diag_buffer->is_segmented);
267 
268 	prev_status = diag_buffer->status;
269 	diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
270 	init_completion(&mrioc->init_cmds.done);
271 	retval = mpi3mr_admin_request_post(mrioc, &diag_buf_post_req,
272 	    sizeof(diag_buf_post_req), 1);
273 	if (retval) {
274 		dprint_bsg_err(mrioc, "%s: admin request post failed\n",
275 		    __func__);
276 		goto out_unlock;
277 	}
278 	wait_for_completion_timeout(&mrioc->init_cmds.done,
279 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
280 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
281 		mrioc->init_cmds.is_waiting = 0;
282 		dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
283 		mpi3mr_check_rh_fault_ioc(mrioc,
284 		    MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT);
285 		retval = -1;
286 		goto out_unlock;
287 	}
288 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
289 	    != MPI3_IOCSTATUS_SUCCESS) {
290 		dprint_bsg_err(mrioc,
291 		    "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n",
292 		    __func__, diag_buffer->type,
293 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
294 		    mrioc->init_cmds.ioc_loginfo);
295 		retval = -1;
296 		goto out_unlock;
297 	}
298 	dprint_bsg_info(mrioc, "%s: diag buffer type %d posted successfully\n",
299 	    __func__, diag_buffer->type);
300 
301 out_unlock:
302 	if (retval)
303 		diag_buffer->status = prev_status;
304 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
305 	mutex_unlock(&mrioc->init_cmds.mutex);
306 	return retval;
307 }
308 
309 /**
310  * mpi3mr_post_diag_bufs - Post diag buffers to the controller
311  * @mrioc: Adapter instance reference
312  *
313  * This function calls helper function to post both trace and
314  * firmware buffers to the controller.
315  *
316  * Return: None
317  */
mpi3mr_post_diag_bufs(struct mpi3mr_ioc * mrioc)318 int mpi3mr_post_diag_bufs(struct mpi3mr_ioc *mrioc)
319 {
320 	u8 i;
321 	struct diag_buffer_desc *diag_buffer;
322 
323 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
324 		diag_buffer = &mrioc->diag_buffers[i];
325 		if (!(diag_buffer->addr))
326 			continue;
327 		if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer))
328 			return -1;
329 	}
330 	return 0;
331 }
332 
333 /**
334  * mpi3mr_issue_diag_buf_release - Send diag buffer release req
335  * @mrioc: Adapter instance reference
336  * @diag_buffer: Diagnostic buffer descriptor
337  *
338  * Issue diagnostic buffer manage MPI request with release
339  * action request through admin queue and wait for the
340  * completion of it or time out.
341  *
342  * Return: 0 on success, non-zero on failures.
343  */
mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc * mrioc,struct diag_buffer_desc * diag_buffer)344 int mpi3mr_issue_diag_buf_release(struct mpi3mr_ioc *mrioc,
345 	struct diag_buffer_desc *diag_buffer)
346 {
347 	struct mpi3_diag_buffer_manage_request diag_buf_manage_req;
348 	int retval = 0;
349 
350 	if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
351 	    (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
352 		return retval;
353 
354 	memset(&diag_buf_manage_req, 0, sizeof(diag_buf_manage_req));
355 	mutex_lock(&mrioc->init_cmds.mutex);
356 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
357 		dprint_reset(mrioc, "%s: command is in use\n", __func__);
358 		mutex_unlock(&mrioc->init_cmds.mutex);
359 		return -1;
360 	}
361 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
362 	mrioc->init_cmds.is_waiting = 1;
363 	mrioc->init_cmds.callback = NULL;
364 	diag_buf_manage_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
365 	diag_buf_manage_req.function = MPI3_FUNCTION_DIAG_BUFFER_MANAGE;
366 	diag_buf_manage_req.type = diag_buffer->type;
367 	diag_buf_manage_req.action = MPI3_DIAG_BUFFER_ACTION_RELEASE;
368 
369 
370 	dprint_reset(mrioc, "%s: releasing diag buffer type %d\n", __func__,
371 	    diag_buffer->type);
372 	init_completion(&mrioc->init_cmds.done);
373 	retval = mpi3mr_admin_request_post(mrioc, &diag_buf_manage_req,
374 	    sizeof(diag_buf_manage_req), 1);
375 	if (retval) {
376 		dprint_reset(mrioc, "%s: admin request post failed\n", __func__);
377 		mpi3mr_set_trigger_data_in_hdb(diag_buffer,
378 		    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
379 		goto out_unlock;
380 	}
381 	wait_for_completion_timeout(&mrioc->init_cmds.done,
382 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
383 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
384 		mrioc->init_cmds.is_waiting = 0;
385 		dprint_reset(mrioc, "%s: command timedout\n", __func__);
386 		mpi3mr_check_rh_fault_ioc(mrioc,
387 		    MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT);
388 		retval = -1;
389 		goto out_unlock;
390 	}
391 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
392 	    != MPI3_IOCSTATUS_SUCCESS) {
393 		dprint_reset(mrioc,
394 		    "%s: command failed, buffer_type (%d) ioc_status(0x%04x) log_info(0x%08x)\n",
395 		    __func__, diag_buffer->type,
396 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
397 		    mrioc->init_cmds.ioc_loginfo);
398 		retval = -1;
399 		goto out_unlock;
400 	}
401 	dprint_reset(mrioc, "%s: diag buffer type %d released successfully\n",
402 	    __func__, diag_buffer->type);
403 
404 out_unlock:
405 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
406 	mutex_unlock(&mrioc->init_cmds.mutex);
407 	return retval;
408 }
409 
410 /**
411  * mpi3mr_process_trigger - Generic HDB Trigger handler
412  * @mrioc: Adapter instance reference
413  * @trigger_type: Trigger type
414  * @trigger_data: Trigger data
415  * @trigger_flags: Trigger flags
416  *
417  * This function checks validity of HDB, triggers and based on
418  * trigger information, creates an event to be processed in the
419  * firmware event worker thread .
420  *
421  * This function should be called with trigger spinlock held
422  *
423  * Return: Nothing
424  */
mpi3mr_process_trigger(struct mpi3mr_ioc * mrioc,u8 trigger_type,union mpi3mr_trigger_data * trigger_data,u8 trigger_flags)425 static void mpi3mr_process_trigger(struct mpi3mr_ioc *mrioc, u8 trigger_type,
426 	union mpi3mr_trigger_data *trigger_data, u8 trigger_flags)
427 {
428 	struct trigger_event_data event_data;
429 	struct diag_buffer_desc *trace_hdb = NULL;
430 	struct diag_buffer_desc *fw_hdb = NULL;
431 	u64 global_trigger;
432 
433 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
434 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
435 	if (trace_hdb &&
436 	    (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
437 	    (trace_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
438 		trace_hdb =  NULL;
439 
440 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
441 
442 	if (fw_hdb &&
443 	    (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
444 	    (fw_hdb->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
445 		fw_hdb = NULL;
446 
447 	if (mrioc->snapdump_trigger_active || (mrioc->fw_release_trigger_active
448 	    && mrioc->trace_release_trigger_active) ||
449 	    (!trace_hdb && !fw_hdb) || (!mrioc->driver_pg2) ||
450 	    ((trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
451 	     && (!mrioc->driver_pg2->num_triggers)))
452 		return;
453 
454 	memset(&event_data, 0, sizeof(event_data));
455 	event_data.trigger_type = trigger_type;
456 	memcpy(&event_data.trigger_specific_data, trigger_data,
457 	    sizeof(*trigger_data));
458 	global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
459 
460 	if (global_trigger & MPI3_DRIVER2_GLOBALTRIGGER_SNAPDUMP_ENABLED) {
461 		event_data.snapdump = true;
462 		event_data.trace_hdb = trace_hdb;
463 		event_data.fw_hdb = fw_hdb;
464 		mrioc->snapdump_trigger_active = true;
465 	} else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_GLOBAL) {
466 		if ((trace_hdb) && (global_trigger &
467 		    MPI3_DRIVER2_GLOBALTRIGGER_DIAG_TRACE_RELEASE) &&
468 		    (!mrioc->trace_release_trigger_active)) {
469 			event_data.trace_hdb = trace_hdb;
470 			mrioc->trace_release_trigger_active = true;
471 		}
472 		if ((fw_hdb) && (global_trigger &
473 		    MPI3_DRIVER2_GLOBALTRIGGER_DIAG_FW_RELEASE) &&
474 		    (!mrioc->fw_release_trigger_active)) {
475 			event_data.fw_hdb = fw_hdb;
476 			mrioc->fw_release_trigger_active = true;
477 		}
478 	} else if (trigger_type == MPI3MR_HDB_TRIGGER_TYPE_ELEMENT) {
479 		if ((trace_hdb) && (trigger_flags &
480 		    MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE) &&
481 		    (!mrioc->trace_release_trigger_active)) {
482 			event_data.trace_hdb = trace_hdb;
483 			mrioc->trace_release_trigger_active = true;
484 		}
485 		if ((fw_hdb) && (trigger_flags &
486 		    MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE) &&
487 		    (!mrioc->fw_release_trigger_active)) {
488 			event_data.fw_hdb = fw_hdb;
489 			mrioc->fw_release_trigger_active = true;
490 		}
491 	}
492 
493 	if (event_data.trace_hdb || event_data.fw_hdb)
494 		mpi3mr_hdb_trigger_data_event(mrioc, &event_data);
495 }
496 
497 /**
498  * mpi3mr_global_trigger - Global HDB trigger handler
499  * @mrioc: Adapter instance reference
500  * @trigger_data: Trigger data
501  *
502  * This function checks whether the given global trigger is
503  * enabled in the driver page 2 and if so calls generic trigger
504  * handler to queue event for HDB release.
505  *
506  * Return: Nothing
507  */
mpi3mr_global_trigger(struct mpi3mr_ioc * mrioc,u64 trigger_data)508 void mpi3mr_global_trigger(struct mpi3mr_ioc *mrioc, u64 trigger_data)
509 {
510 	unsigned long flags;
511 	union mpi3mr_trigger_data trigger_specific_data;
512 
513 	spin_lock_irqsave(&mrioc->trigger_lock, flags);
514 	if (le64_to_cpu(mrioc->driver_pg2->global_trigger) & trigger_data) {
515 		memset(&trigger_specific_data, 0,
516 		    sizeof(trigger_specific_data));
517 		trigger_specific_data.global = trigger_data;
518 		mpi3mr_process_trigger(mrioc, MPI3MR_HDB_TRIGGER_TYPE_GLOBAL,
519 		    &trigger_specific_data, 0);
520 	}
521 	spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
522 }
523 
524 /**
525  * mpi3mr_scsisense_trigger - SCSI sense HDB trigger handler
526  * @mrioc: Adapter instance reference
527  * @sensekey: Sense Key
528  * @asc: Additional Sense Code
529  * @ascq: Additional Sense Code Qualifier
530  *
531  * This function compares SCSI sense trigger values with driver
532  * page 2 values and calls generic trigger handler to release
533  * HDBs if match found
534  *
535  * Return: Nothing
536  */
mpi3mr_scsisense_trigger(struct mpi3mr_ioc * mrioc,u8 sensekey,u8 asc,u8 ascq)537 void mpi3mr_scsisense_trigger(struct mpi3mr_ioc *mrioc, u8 sensekey, u8 asc,
538 	u8 ascq)
539 {
540 	struct mpi3_driver2_trigger_scsi_sense *scsi_sense_trigger = NULL;
541 	u64 i = 0;
542 	unsigned long flags;
543 	u8 num_triggers, trigger_flags;
544 
545 	if (mrioc->scsisense_trigger_present) {
546 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
547 		scsi_sense_trigger = (struct mpi3_driver2_trigger_scsi_sense *)
548 			mrioc->driver_pg2->trigger;
549 		num_triggers = mrioc->driver_pg2->num_triggers;
550 		for (i = 0; i < num_triggers; i++, scsi_sense_trigger++) {
551 			if (scsi_sense_trigger->type !=
552 			    MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE)
553 				continue;
554 			if (!(scsi_sense_trigger->sense_key ==
555 			    MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL
556 			      || scsi_sense_trigger->sense_key == sensekey))
557 				continue;
558 			if (!(scsi_sense_trigger->asc ==
559 			    MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL ||
560 			    scsi_sense_trigger->asc == asc))
561 				continue;
562 			if (!(scsi_sense_trigger->ascq ==
563 			    MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL ||
564 			    scsi_sense_trigger->ascq == ascq))
565 				continue;
566 			trigger_flags = scsi_sense_trigger->flags;
567 			mpi3mr_process_trigger(mrioc,
568 			    MPI3MR_HDB_TRIGGER_TYPE_ELEMENT,
569 			    (union mpi3mr_trigger_data *)scsi_sense_trigger,
570 			    trigger_flags);
571 			break;
572 		}
573 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
574 	}
575 }
576 
577 /**
578  * mpi3mr_event_trigger - MPI event HDB trigger handler
579  * @mrioc: Adapter instance reference
580  * @event: MPI Event
581  *
582  * This function compares event trigger values with driver page
583  * 2 values and calls generic trigger handler to release
584  * HDBs if match found.
585  *
586  * Return: Nothing
587  */
mpi3mr_event_trigger(struct mpi3mr_ioc * mrioc,u8 event)588 void mpi3mr_event_trigger(struct mpi3mr_ioc *mrioc, u8 event)
589 {
590 	struct mpi3_driver2_trigger_event *event_trigger = NULL;
591 	u64 i = 0;
592 	unsigned long flags;
593 	u8 num_triggers, trigger_flags;
594 
595 	if (mrioc->event_trigger_present) {
596 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
597 		event_trigger = (struct mpi3_driver2_trigger_event *)
598 			mrioc->driver_pg2->trigger;
599 		num_triggers = mrioc->driver_pg2->num_triggers;
600 
601 		for (i = 0; i < num_triggers; i++, event_trigger++) {
602 			if (event_trigger->type !=
603 			    MPI3_DRIVER2_TRIGGER_TYPE_EVENT)
604 				continue;
605 			if (event_trigger->event != event)
606 				continue;
607 			trigger_flags = event_trigger->flags;
608 			mpi3mr_process_trigger(mrioc,
609 			    MPI3MR_HDB_TRIGGER_TYPE_ELEMENT,
610 			    (union mpi3mr_trigger_data *)event_trigger,
611 			    trigger_flags);
612 			break;
613 		}
614 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
615 	}
616 }
617 
618 /**
619  * mpi3mr_reply_trigger - MPI Reply HDB trigger handler
620  * @mrioc: Adapter instance reference
621  * @ioc_status: Masked value of IOC Status from MPI Reply
622  * @ioc_loginfo: IOC Log Info from MPI Reply
623  *
624  * This function compares IOC status and IOC log info trigger
625  * values with driver page 2 values and calls generic trigger
626  * handler to release HDBs if match found.
627  *
628  * Return: Nothing
629  */
mpi3mr_reply_trigger(struct mpi3mr_ioc * mrioc,u16 ioc_status,u32 ioc_loginfo)630 void mpi3mr_reply_trigger(struct mpi3mr_ioc *mrioc, u16 ioc_status,
631 	u32 ioc_loginfo)
632 {
633 	struct mpi3_driver2_trigger_reply *reply_trigger = NULL;
634 	u64 i = 0;
635 	unsigned long flags;
636 	u8 num_triggers, trigger_flags;
637 
638 	if (mrioc->reply_trigger_present) {
639 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
640 		reply_trigger = (struct mpi3_driver2_trigger_reply *)
641 			mrioc->driver_pg2->trigger;
642 		num_triggers = mrioc->driver_pg2->num_triggers;
643 		for (i = 0; i < num_triggers; i++, reply_trigger++) {
644 			if (reply_trigger->type !=
645 			    MPI3_DRIVER2_TRIGGER_TYPE_REPLY)
646 				continue;
647 			if ((le16_to_cpu(reply_trigger->ioc_status) !=
648 			     ioc_status)
649 			    && (le16_to_cpu(reply_trigger->ioc_status) !=
650 			    MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL))
651 				continue;
652 			if ((le32_to_cpu(reply_trigger->ioc_log_info) !=
653 			    (le32_to_cpu(reply_trigger->ioc_log_info_mask) &
654 			     ioc_loginfo)))
655 				continue;
656 			trigger_flags = reply_trigger->flags;
657 			mpi3mr_process_trigger(mrioc,
658 			    MPI3MR_HDB_TRIGGER_TYPE_ELEMENT,
659 			    (union mpi3mr_trigger_data *)reply_trigger,
660 			    trigger_flags);
661 			break;
662 		}
663 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
664 	}
665 }
666 
667 /**
668  * mpi3mr_get_num_trigger - Gets number of HDB triggers
669  * @mrioc: Adapter instance reference
670  * @num_triggers: Number of triggers
671  * @page_action: Page action
672  *
673  * This function reads number of triggers by reading driver page
674  * 2
675  *
676  * Return: 0 on success and proper error codes on failure
677  */
mpi3mr_get_num_trigger(struct mpi3mr_ioc * mrioc,u8 * num_triggers,u8 page_action)678 static int mpi3mr_get_num_trigger(struct mpi3mr_ioc *mrioc, u8 *num_triggers,
679 	u8 page_action)
680 {
681 	struct mpi3_driver_page2 drvr_page2;
682 	int retval = 0;
683 
684 	*num_triggers = 0;
685 
686 	retval = mpi3mr_cfg_get_driver_pg2(mrioc, &drvr_page2,
687 	    sizeof(struct mpi3_driver_page2), page_action);
688 
689 	if (retval) {
690 		dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__);
691 		return retval;
692 	}
693 	*num_triggers = drvr_page2.num_triggers;
694 	return retval;
695 }
696 
697 /**
698  * mpi3mr_refresh_trigger - Handler for Refresh trigger BSG
699  * @mrioc: Adapter instance reference
700  * @page_action: Page action
701  *
702  * This function caches the driver page 2 in the driver's memory
703  * by reading driver page 2 from the controller for a given page
704  * type and updates the HDB trigger values
705  *
706  * Return: 0 on success and proper error codes on failure
707  */
mpi3mr_refresh_trigger(struct mpi3mr_ioc * mrioc,u8 page_action)708 int mpi3mr_refresh_trigger(struct mpi3mr_ioc *mrioc, u8 page_action)
709 {
710 	u16 pg_sz = sizeof(struct mpi3_driver_page2);
711 	struct mpi3_driver_page2 *drvr_page2 = NULL;
712 	u8 trigger_type, num_triggers;
713 	int retval;
714 	int i = 0;
715 	unsigned long flags;
716 
717 	retval = mpi3mr_get_num_trigger(mrioc, &num_triggers, page_action);
718 
719 	if (retval)
720 		goto out;
721 
722 	pg_sz = offsetof(struct mpi3_driver_page2, trigger) +
723 		(num_triggers * sizeof(union mpi3_driver2_trigger_element));
724 	drvr_page2 = kzalloc(pg_sz, GFP_KERNEL);
725 	if (!drvr_page2) {
726 		retval = -ENOMEM;
727 		goto out;
728 	}
729 
730 	retval = mpi3mr_cfg_get_driver_pg2(mrioc, drvr_page2, pg_sz, page_action);
731 	if (retval) {
732 		dprint_init(mrioc, "%s: driver page 2 read failed\n", __func__);
733 		kfree(drvr_page2);
734 		goto out;
735 	}
736 	spin_lock_irqsave(&mrioc->trigger_lock, flags);
737 	kfree(mrioc->driver_pg2);
738 	mrioc->driver_pg2 = drvr_page2;
739 	mrioc->reply_trigger_present = false;
740 	mrioc->event_trigger_present = false;
741 	mrioc->scsisense_trigger_present = false;
742 
743 	for (i = 0; (i < mrioc->driver_pg2->num_triggers); i++) {
744 		trigger_type = mrioc->driver_pg2->trigger[i].event.type;
745 		switch (trigger_type) {
746 		case MPI3_DRIVER2_TRIGGER_TYPE_REPLY:
747 			mrioc->reply_trigger_present = true;
748 			break;
749 		case MPI3_DRIVER2_TRIGGER_TYPE_EVENT:
750 			mrioc->event_trigger_present = true;
751 			break;
752 		case MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE:
753 			mrioc->scsisense_trigger_present = true;
754 			break;
755 		default:
756 			break;
757 		}
758 	}
759 	spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
760 out:
761 	return retval;
762 }
763 
764 /**
765  * mpi3mr_release_diag_bufs - Release diag buffers
766  * @mrioc: Adapter instance reference
767  * @skip_rel_action: Skip release action and set buffer state
768  *
769  * This function calls helper function to release both trace and
770  * firmware buffers from the controller.
771  *
772  * Return: None
773  */
mpi3mr_release_diag_bufs(struct mpi3mr_ioc * mrioc,u8 skip_rel_action)774 void mpi3mr_release_diag_bufs(struct mpi3mr_ioc *mrioc, u8 skip_rel_action)
775 {
776 	u8 i;
777 	struct diag_buffer_desc *diag_buffer;
778 
779 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
780 		diag_buffer = &mrioc->diag_buffers[i];
781 		if (!(diag_buffer->addr))
782 			continue;
783 		if (diag_buffer->status == MPI3MR_HDB_BUFSTATUS_RELEASED)
784 			continue;
785 		if (!skip_rel_action)
786 			mpi3mr_issue_diag_buf_release(mrioc, diag_buffer);
787 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED;
788 		atomic64_inc(&event_counter);
789 	}
790 }
791 
792 /**
793  * mpi3mr_set_trigger_data_in_hdb - Updates HDB trigger type and
794  * trigger data
795  *
796  * @hdb: HDB pointer
797  * @type: Trigger type
798  * @trigger_data: Pointer to trigger data information
799  * @force: Trigger overwrite flag
800  *
801  * Updates trigger type and trigger data based on parameter
802  * passed to this function
803  *
804  * Return: Nothing
805  */
mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc * hdb,u8 type,union mpi3mr_trigger_data * trigger_data,bool force)806 void mpi3mr_set_trigger_data_in_hdb(struct diag_buffer_desc *hdb,
807 	u8 type, union mpi3mr_trigger_data *trigger_data, bool force)
808 {
809 	if ((!force) && (hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN))
810 		return;
811 	hdb->trigger_type = type;
812 	if (!trigger_data)
813 		memset(&hdb->trigger_data, 0, sizeof(*trigger_data));
814 	else
815 		memcpy(&hdb->trigger_data, trigger_data, sizeof(*trigger_data));
816 }
817 
818 /**
819  * mpi3mr_set_trigger_data_in_all_hdb - Updates HDB trigger type
820  * and trigger data for all HDB
821  *
822  * @mrioc: Adapter instance reference
823  * @type: Trigger type
824  * @trigger_data: Pointer to trigger data information
825  * @force: Trigger overwrite flag
826  *
827  * Updates trigger type and trigger data based on parameter
828  * passed to this function
829  *
830  * Return: Nothing
831  */
mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc * mrioc,u8 type,union mpi3mr_trigger_data * trigger_data,bool force)832 void mpi3mr_set_trigger_data_in_all_hdb(struct mpi3mr_ioc *mrioc,
833 	u8 type, union mpi3mr_trigger_data *trigger_data, bool force)
834 {
835 	struct diag_buffer_desc *hdb = NULL;
836 
837 	hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_TRACE);
838 	if (hdb)
839 		mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force);
840 	hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
841 	if (hdb)
842 		mpi3mr_set_trigger_data_in_hdb(hdb, type, trigger_data, force);
843 }
844 
845 /**
846  * mpi3mr_hdbstatuschg_evt_th - HDB status change evt tophalf
847  * @mrioc: Adapter instance reference
848  * @event_reply: event data
849  *
850  * Modifies the status of the applicable diag buffer descriptors
851  *
852  * Return: Nothing
853  */
mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)854 void mpi3mr_hdbstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
855 	struct mpi3_event_notification_reply *event_reply)
856 {
857 	struct mpi3_event_data_diag_buffer_status_change *evtdata;
858 	struct diag_buffer_desc *diag_buffer;
859 
860 	evtdata = (struct mpi3_event_data_diag_buffer_status_change *)
861 	    event_reply->event_data;
862 
863 	diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, evtdata->type);
864 	if (!diag_buffer)
865 		return;
866 	if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED) &&
867 	    (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED))
868 		return;
869 	switch (evtdata->reason_code) {
870 	case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED:
871 	{
872 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_RELEASED;
873 		mpi3mr_set_trigger_data_in_hdb(diag_buffer,
874 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
875 		atomic64_inc(&event_counter);
876 		break;
877 	}
878 	case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED:
879 	{
880 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
881 		break;
882 	}
883 	case MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED:
884 	{
885 		diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED;
886 		break;
887 	}
888 	default:
889 		dprint_event_th(mrioc, "%s: unknown reason_code(%d)\n",
890 		    __func__, evtdata->reason_code);
891 		break;
892 	}
893 }
894 
895 /**
896  * mpi3mr_diag_buffer_for_type - returns buffer desc for type
897  * @mrioc: Adapter instance reference
898  * @buf_type: Diagnostic buffer type
899  *
900  * Identifies matching diag descriptor from mrioc for given diag
901  * buffer type.
902  *
903  * Return: diag buffer descriptor on success, NULL on failures.
904  */
905 
906 struct diag_buffer_desc *
mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc * mrioc,u8 buf_type)907 mpi3mr_diag_buffer_for_type(struct mpi3mr_ioc *mrioc, u8 buf_type)
908 {
909 	u8 i;
910 
911 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
912 		if (mrioc->diag_buffers[i].type == buf_type)
913 			return &mrioc->diag_buffers[i];
914 	}
915 	return NULL;
916 }
917 
918 /**
919  * mpi3mr_bsg_pel_abort - sends PEL abort request
920  * @mrioc: Adapter instance reference
921  *
922  * This function sends PEL abort request to the firmware through
923  * admin request queue.
924  *
925  * Return: 0 on success, -1 on failure
926  */
mpi3mr_bsg_pel_abort(struct mpi3mr_ioc * mrioc)927 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
928 {
929 	struct mpi3_pel_req_action_abort pel_abort_req;
930 	struct mpi3_pel_reply *pel_reply;
931 	int retval = 0;
932 	u16 pe_log_status;
933 
934 	if (mrioc->reset_in_progress) {
935 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
936 		return -1;
937 	}
938 	if (mrioc->stop_bsgs || mrioc->block_on_pci_err) {
939 		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
940 		return -1;
941 	}
942 
943 	memset(&pel_abort_req, 0, sizeof(pel_abort_req));
944 	mutex_lock(&mrioc->pel_abort_cmd.mutex);
945 	if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
946 		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
947 		mutex_unlock(&mrioc->pel_abort_cmd.mutex);
948 		return -1;
949 	}
950 	mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
951 	mrioc->pel_abort_cmd.is_waiting = 1;
952 	mrioc->pel_abort_cmd.callback = NULL;
953 	pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
954 	pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
955 	pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
956 	pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
957 
958 	mrioc->pel_abort_requested = 1;
959 	init_completion(&mrioc->pel_abort_cmd.done);
960 	retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
961 	    sizeof(pel_abort_req), 0);
962 	if (retval) {
963 		retval = -1;
964 		dprint_bsg_err(mrioc, "%s: admin request post failed\n",
965 		    __func__);
966 		mrioc->pel_abort_requested = 0;
967 		goto out_unlock;
968 	}
969 
970 	wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
971 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
972 	if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
973 		mrioc->pel_abort_cmd.is_waiting = 0;
974 		dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
975 		if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
976 			mpi3mr_soft_reset_handler(mrioc,
977 			    MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
978 		retval = -1;
979 		goto out_unlock;
980 	}
981 	if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
982 	     != MPI3_IOCSTATUS_SUCCESS) {
983 		dprint_bsg_err(mrioc,
984 		    "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
985 		    __func__, (mrioc->pel_abort_cmd.ioc_status &
986 		    MPI3_IOCSTATUS_STATUS_MASK),
987 		    mrioc->pel_abort_cmd.ioc_loginfo);
988 		retval = -1;
989 		goto out_unlock;
990 	}
991 	if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
992 		pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
993 		pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
994 		if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
995 			dprint_bsg_err(mrioc,
996 			    "%s: command failed, pel_status(0x%04x)\n",
997 			    __func__, pe_log_status);
998 			retval = -1;
999 		}
1000 	}
1001 
1002 out_unlock:
1003 	mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
1004 	mutex_unlock(&mrioc->pel_abort_cmd.mutex);
1005 	return retval;
1006 }
1007 /**
1008  * mpi3mr_bsg_verify_adapter - verify adapter number is valid
1009  * @ioc_number: Adapter number
1010  *
1011  * This function returns the adapter instance pointer of given
1012  * adapter number. If adapter number does not match with the
1013  * driver's adapter list, driver returns NULL.
1014  *
1015  * Return: adapter instance reference
1016  */
mpi3mr_bsg_verify_adapter(int ioc_number)1017 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
1018 {
1019 	struct mpi3mr_ioc *mrioc = NULL;
1020 
1021 	spin_lock(&mrioc_list_lock);
1022 	list_for_each_entry(mrioc, &mrioc_list, list) {
1023 		if (mrioc->id == ioc_number) {
1024 			spin_unlock(&mrioc_list_lock);
1025 			return mrioc;
1026 		}
1027 	}
1028 	spin_unlock(&mrioc_list_lock);
1029 	return NULL;
1030 }
1031 
1032 /**
1033  * mpi3mr_bsg_refresh_hdb_triggers - Refresh HDB trigger data
1034  * @mrioc: Adapter instance reference
1035  * @job: BSG Job pointer
1036  *
1037  * This function reads the controller trigger config page as
1038  * defined by the input page type and refreshes the driver's
1039  * local trigger information structures with the controller's
1040  * config page data.
1041  *
1042  * Return: 0 on success and proper error codes on failure
1043  */
1044 static long
mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1045 mpi3mr_bsg_refresh_hdb_triggers(struct mpi3mr_ioc *mrioc,
1046 				struct bsg_job *job)
1047 {
1048 	struct mpi3mr_bsg_out_refresh_hdb_triggers refresh_triggers;
1049 	uint32_t data_out_sz;
1050 	u8 page_action;
1051 	long rval = -EINVAL;
1052 
1053 	data_out_sz = job->request_payload.payload_len;
1054 
1055 	if (data_out_sz != sizeof(refresh_triggers)) {
1056 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1057 		    __func__);
1058 		return rval;
1059 	}
1060 
1061 	if (mrioc->unrecoverable) {
1062 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1063 		    __func__);
1064 		return -EFAULT;
1065 	}
1066 	if (mrioc->reset_in_progress) {
1067 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1068 		return -EAGAIN;
1069 	}
1070 
1071 	sg_copy_to_buffer(job->request_payload.sg_list,
1072 	    job->request_payload.sg_cnt,
1073 	    &refresh_triggers, sizeof(refresh_triggers));
1074 
1075 	switch (refresh_triggers.page_type) {
1076 	case MPI3MR_HDB_REFRESH_TYPE_CURRENT:
1077 		page_action = MPI3_CONFIG_ACTION_READ_CURRENT;
1078 		break;
1079 	case MPI3MR_HDB_REFRESH_TYPE_DEFAULT:
1080 		page_action = MPI3_CONFIG_ACTION_READ_DEFAULT;
1081 		break;
1082 	case MPI3MR_HDB_HDB_REFRESH_TYPE_PERSISTENT:
1083 		page_action = MPI3_CONFIG_ACTION_READ_PERSISTENT;
1084 		break;
1085 	default:
1086 		dprint_bsg_err(mrioc,
1087 		    "%s: unsupported refresh trigger, page_type %d\n",
1088 		    __func__, refresh_triggers.page_type);
1089 		return rval;
1090 	}
1091 	rval = mpi3mr_refresh_trigger(mrioc, page_action);
1092 
1093 	return rval;
1094 }
1095 
1096 /**
1097  * mpi3mr_bsg_upload_hdb - Upload a specific HDB to user space
1098  * @mrioc: Adapter instance reference
1099  * @job: BSG Job pointer
1100  *
1101  * Return: 0 on success and proper error codes on failure
1102  */
mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1103 static long mpi3mr_bsg_upload_hdb(struct mpi3mr_ioc *mrioc,
1104 				  struct bsg_job *job)
1105 {
1106 	struct mpi3mr_bsg_out_upload_hdb upload_hdb;
1107 	struct diag_buffer_desc *diag_buffer;
1108 	uint32_t data_out_size;
1109 	uint32_t data_in_size;
1110 
1111 	data_out_size = job->request_payload.payload_len;
1112 	data_in_size = job->reply_payload.payload_len;
1113 
1114 	if (data_out_size != sizeof(upload_hdb)) {
1115 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1116 		    __func__);
1117 		return -EINVAL;
1118 	}
1119 
1120 	sg_copy_to_buffer(job->request_payload.sg_list,
1121 			  job->request_payload.sg_cnt,
1122 			  &upload_hdb, sizeof(upload_hdb));
1123 
1124 	if ((!upload_hdb.length) || (data_in_size != upload_hdb.length)) {
1125 		dprint_bsg_err(mrioc, "%s: invalid length argument\n",
1126 		    __func__);
1127 		return -EINVAL;
1128 	}
1129 	diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, upload_hdb.buf_type);
1130 	if ((!diag_buffer) || (!diag_buffer->addr)) {
1131 		dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n",
1132 		    __func__, upload_hdb.buf_type);
1133 		return -EINVAL;
1134 	}
1135 
1136 	if ((diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) &&
1137 	    (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_POSTED_PAUSED)) {
1138 		dprint_bsg_err(mrioc,
1139 		    "%s: invalid buffer status %d for type %d\n",
1140 		    __func__, diag_buffer->status, upload_hdb.buf_type);
1141 		return -EINVAL;
1142 	}
1143 
1144 	if ((upload_hdb.start_offset + upload_hdb.length) > diag_buffer->size) {
1145 		dprint_bsg_err(mrioc,
1146 		    "%s: invalid start offset %d, length %d for type %d\n",
1147 		    __func__, upload_hdb.start_offset, upload_hdb.length,
1148 		    upload_hdb.buf_type);
1149 		return -EINVAL;
1150 	}
1151 	sg_copy_from_buffer(job->reply_payload.sg_list,
1152 			    job->reply_payload.sg_cnt,
1153 	    (diag_buffer->addr + upload_hdb.start_offset),
1154 	    data_in_size);
1155 	return 0;
1156 }
1157 
1158 /**
1159  * mpi3mr_bsg_repost_hdb - Re-post HDB
1160  * @mrioc: Adapter instance reference
1161  * @job: BSG job pointer
1162  *
1163  * This function retrieves the HDB descriptor corresponding to a
1164  * given buffer type and if the HDB is in released status then
1165  * posts the HDB with the firmware.
1166  *
1167  * Return: 0 on success and proper error codes on failure
1168  */
mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1169 static long mpi3mr_bsg_repost_hdb(struct mpi3mr_ioc *mrioc,
1170 				  struct bsg_job *job)
1171 {
1172 	struct mpi3mr_bsg_out_repost_hdb repost_hdb;
1173 	struct diag_buffer_desc *diag_buffer;
1174 	uint32_t data_out_sz;
1175 
1176 	data_out_sz = job->request_payload.payload_len;
1177 
1178 	if (data_out_sz != sizeof(repost_hdb)) {
1179 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1180 		    __func__);
1181 		return -EINVAL;
1182 	}
1183 	if (mrioc->unrecoverable) {
1184 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1185 		    __func__);
1186 		return -EFAULT;
1187 	}
1188 	if (mrioc->reset_in_progress) {
1189 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1190 		return -EAGAIN;
1191 	}
1192 
1193 	sg_copy_to_buffer(job->request_payload.sg_list,
1194 			  job->request_payload.sg_cnt,
1195 			  &repost_hdb, sizeof(repost_hdb));
1196 
1197 	diag_buffer = mpi3mr_diag_buffer_for_type(mrioc, repost_hdb.buf_type);
1198 	if ((!diag_buffer) || (!diag_buffer->addr)) {
1199 		dprint_bsg_err(mrioc, "%s: invalid buffer type %d\n",
1200 		    __func__, repost_hdb.buf_type);
1201 		return -EINVAL;
1202 	}
1203 
1204 	if (diag_buffer->status != MPI3MR_HDB_BUFSTATUS_RELEASED) {
1205 		dprint_bsg_err(mrioc,
1206 		    "%s: invalid buffer status %d for type %d\n",
1207 		    __func__, diag_buffer->status, repost_hdb.buf_type);
1208 		return -EINVAL;
1209 	}
1210 
1211 	if (mpi3mr_issue_diag_buf_post(mrioc, diag_buffer)) {
1212 		dprint_bsg_err(mrioc, "%s: post failed for type %d\n",
1213 		    __func__, repost_hdb.buf_type);
1214 		return -EFAULT;
1215 	}
1216 	mpi3mr_set_trigger_data_in_hdb(diag_buffer,
1217 	    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
1218 
1219 	return 0;
1220 }
1221 
1222 /**
1223  * mpi3mr_bsg_query_hdb - Handler for query HDB command
1224  * @mrioc: Adapter instance reference
1225  * @job: BSG job pointer
1226  *
1227  * This function prepares and copies the host diagnostic buffer
1228  * entries to the user buffer.
1229  *
1230  * Return: 0 on success and proper error codes on failure
1231  */
mpi3mr_bsg_query_hdb(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1232 static long mpi3mr_bsg_query_hdb(struct mpi3mr_ioc *mrioc,
1233 				 struct bsg_job *job)
1234 {
1235 	long rval = 0;
1236 	struct mpi3mr_bsg_in_hdb_status *hbd_status;
1237 	struct mpi3mr_hdb_entry *hbd_status_entry;
1238 	u32 length, min_length;
1239 	u8 i;
1240 	struct diag_buffer_desc *diag_buffer;
1241 	uint32_t data_in_sz = 0;
1242 
1243 	data_in_sz = job->request_payload.payload_len;
1244 
1245 	length = (sizeof(*hbd_status) + ((MPI3MR_MAX_NUM_HDB - 1) *
1246 		    sizeof(*hbd_status_entry)));
1247 	hbd_status = kmalloc(length, GFP_KERNEL);
1248 	if (!hbd_status)
1249 		return -ENOMEM;
1250 	hbd_status_entry = &hbd_status->entry[0];
1251 
1252 	hbd_status->num_hdb_types = MPI3MR_MAX_NUM_HDB;
1253 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
1254 		diag_buffer = &mrioc->diag_buffers[i];
1255 		hbd_status_entry->buf_type = diag_buffer->type;
1256 		hbd_status_entry->status = diag_buffer->status;
1257 		hbd_status_entry->trigger_type = diag_buffer->trigger_type;
1258 		memcpy(&hbd_status_entry->trigger_data,
1259 		    &diag_buffer->trigger_data,
1260 		    sizeof(hbd_status_entry->trigger_data));
1261 		hbd_status_entry->size = (diag_buffer->size / 1024);
1262 		hbd_status_entry++;
1263 	}
1264 	hbd_status->element_trigger_format =
1265 		MPI3MR_HDB_QUERY_ELEMENT_TRIGGER_FORMAT_DATA;
1266 
1267 	if (data_in_sz < 4) {
1268 		dprint_bsg_err(mrioc, "%s: invalid size passed\n", __func__);
1269 		rval = -EINVAL;
1270 		goto out;
1271 	}
1272 	min_length = min(data_in_sz, length);
1273 	if (job->request_payload.payload_len >= min_length) {
1274 		sg_copy_from_buffer(job->request_payload.sg_list,
1275 				    job->request_payload.sg_cnt,
1276 				    hbd_status, min_length);
1277 		rval = 0;
1278 	}
1279 out:
1280 	kfree(hbd_status);
1281 	return rval;
1282 }
1283 
1284 
1285 /**
1286  * mpi3mr_enable_logdata - Handler for log data enable
1287  * @mrioc: Adapter instance reference
1288  * @job: BSG job reference
1289  *
1290  * This function enables log data caching in the driver if not
1291  * already enabled and return the maximum number of log data
1292  * entries that can be cached in the driver.
1293  *
1294  * Return: 0 on success and proper error codes on failure
1295  */
mpi3mr_enable_logdata(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1296 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
1297 	struct bsg_job *job)
1298 {
1299 	struct mpi3mr_logdata_enable logdata_enable;
1300 
1301 	if (!mrioc->logdata_buf) {
1302 		mrioc->logdata_entry_sz =
1303 		    (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
1304 		    + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
1305 		mrioc->logdata_buf_idx = 0;
1306 		mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
1307 		    mrioc->logdata_entry_sz, GFP_KERNEL);
1308 
1309 		if (!mrioc->logdata_buf)
1310 			return -ENOMEM;
1311 	}
1312 
1313 	memset(&logdata_enable, 0, sizeof(logdata_enable));
1314 	logdata_enable.max_entries =
1315 	    MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
1316 	if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
1317 		sg_copy_from_buffer(job->request_payload.sg_list,
1318 				    job->request_payload.sg_cnt,
1319 				    &logdata_enable, sizeof(logdata_enable));
1320 		return 0;
1321 	}
1322 
1323 	return -EINVAL;
1324 }
1325 /**
1326  * mpi3mr_get_logdata - Handler for get log data
1327  * @mrioc: Adapter instance reference
1328  * @job: BSG job pointer
1329  * This function copies the log data entries to the user buffer
1330  * when log caching is enabled in the driver.
1331  *
1332  * Return: 0 on success and proper error codes on failure
1333  */
mpi3mr_get_logdata(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1334 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
1335 	struct bsg_job *job)
1336 {
1337 	u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
1338 
1339 	if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
1340 		return -EINVAL;
1341 
1342 	num_entries = job->request_payload.payload_len / entry_sz;
1343 	if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
1344 		num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
1345 	sz = num_entries * entry_sz;
1346 
1347 	if (job->request_payload.payload_len >= sz) {
1348 		sg_copy_from_buffer(job->request_payload.sg_list,
1349 				    job->request_payload.sg_cnt,
1350 				    mrioc->logdata_buf, sz);
1351 		return 0;
1352 	}
1353 	return -EINVAL;
1354 }
1355 
1356 /**
1357  * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
1358  * @mrioc: Adapter instance reference
1359  * @job: BSG job pointer
1360  *
1361  * This function is the handler for PEL enable driver.
1362  * Validates the application given class and locale and if
1363  * requires aborts the existing PEL wait request and/or issues
1364  * new PEL wait request to the firmware and returns.
1365  *
1366  * Return: 0 on success and proper error codes on failure.
1367  */
mpi3mr_bsg_pel_enable(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1368 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
1369 				  struct bsg_job *job)
1370 {
1371 	long rval = -EINVAL;
1372 	struct mpi3mr_bsg_out_pel_enable pel_enable;
1373 	u8 issue_pel_wait;
1374 	u8 tmp_class;
1375 	u16 tmp_locale;
1376 
1377 	if (job->request_payload.payload_len != sizeof(pel_enable)) {
1378 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1379 		    __func__);
1380 		return rval;
1381 	}
1382 
1383 	if (mrioc->unrecoverable) {
1384 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
1385 			       __func__);
1386 		return -EFAULT;
1387 	}
1388 
1389 	if (mrioc->reset_in_progress) {
1390 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
1391 		return -EAGAIN;
1392 	}
1393 
1394 	if (mrioc->stop_bsgs) {
1395 		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
1396 		return -EAGAIN;
1397 	}
1398 
1399 	sg_copy_to_buffer(job->request_payload.sg_list,
1400 			  job->request_payload.sg_cnt,
1401 			  &pel_enable, sizeof(pel_enable));
1402 
1403 	if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
1404 		dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
1405 			__func__, pel_enable.pel_class);
1406 		rval = 0;
1407 		goto out;
1408 	}
1409 	if (!mrioc->pel_enabled)
1410 		issue_pel_wait = 1;
1411 	else {
1412 		if ((mrioc->pel_class <= pel_enable.pel_class) &&
1413 		    !((mrioc->pel_locale & pel_enable.pel_locale) ^
1414 		      pel_enable.pel_locale)) {
1415 			issue_pel_wait = 0;
1416 			rval = 0;
1417 		} else {
1418 			pel_enable.pel_locale |= mrioc->pel_locale;
1419 
1420 			if (mrioc->pel_class < pel_enable.pel_class)
1421 				pel_enable.pel_class = mrioc->pel_class;
1422 
1423 			rval = mpi3mr_bsg_pel_abort(mrioc);
1424 			if (rval) {
1425 				dprint_bsg_err(mrioc,
1426 				    "%s: pel_abort failed, status(%ld)\n",
1427 				    __func__, rval);
1428 				goto out;
1429 			}
1430 			issue_pel_wait = 1;
1431 		}
1432 	}
1433 	if (issue_pel_wait) {
1434 		tmp_class = mrioc->pel_class;
1435 		tmp_locale = mrioc->pel_locale;
1436 		mrioc->pel_class = pel_enable.pel_class;
1437 		mrioc->pel_locale = pel_enable.pel_locale;
1438 		mrioc->pel_enabled = 1;
1439 		rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
1440 		if (rval) {
1441 			mrioc->pel_class = tmp_class;
1442 			mrioc->pel_locale = tmp_locale;
1443 			mrioc->pel_enabled = 0;
1444 			dprint_bsg_err(mrioc,
1445 			    "%s: pel get sequence number failed, status(%ld)\n",
1446 			    __func__, rval);
1447 		}
1448 	}
1449 
1450 out:
1451 	return rval;
1452 }
1453 /**
1454  * mpi3mr_get_all_tgt_info - Get all target information
1455  * @mrioc: Adapter instance reference
1456  * @job: BSG job reference
1457  *
1458  * This function copies the driver managed target devices device
1459  * handle, persistent ID, bus ID and taret ID to the user
1460  * provided buffer for the specific controller. This function
1461  * also provides the number of devices managed by the driver for
1462  * the specific controller.
1463  *
1464  * Return: 0 on success and proper error codes on failure
1465  */
mpi3mr_get_all_tgt_info(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1466 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
1467 	struct bsg_job *job)
1468 {
1469 	u16 num_devices = 0, i = 0, size;
1470 	unsigned long flags;
1471 	struct mpi3mr_tgt_dev *tgtdev;
1472 	struct mpi3mr_device_map_info *devmap_info = NULL;
1473 	struct mpi3mr_all_tgt_info *alltgt_info = NULL;
1474 	uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
1475 
1476 	if (job->request_payload.payload_len < sizeof(u32)) {
1477 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1478 		    __func__);
1479 		return -EINVAL;
1480 	}
1481 
1482 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1483 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
1484 		num_devices++;
1485 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
1486 
1487 	if ((job->request_payload.payload_len <= sizeof(u64)) ||
1488 		list_empty(&mrioc->tgtdev_list)) {
1489 		sg_copy_from_buffer(job->request_payload.sg_list,
1490 				    job->request_payload.sg_cnt,
1491 				    &num_devices, sizeof(num_devices));
1492 		return 0;
1493 	}
1494 
1495 	kern_entrylen = num_devices * sizeof(*devmap_info);
1496 	size = sizeof(u64) + kern_entrylen;
1497 	alltgt_info = kzalloc(size, GFP_KERNEL);
1498 	if (!alltgt_info)
1499 		return -ENOMEM;
1500 
1501 	devmap_info = alltgt_info->dmi;
1502 	memset((u8 *)devmap_info, 0xFF, kern_entrylen);
1503 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1504 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1505 		if (i < num_devices) {
1506 			devmap_info[i].handle = tgtdev->dev_handle;
1507 			devmap_info[i].perst_id = tgtdev->perst_id;
1508 			if (tgtdev->host_exposed && tgtdev->starget) {
1509 				devmap_info[i].target_id = tgtdev->starget->id;
1510 				devmap_info[i].bus_id =
1511 				    tgtdev->starget->channel;
1512 			}
1513 			i++;
1514 		}
1515 	}
1516 	num_devices = i;
1517 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
1518 
1519 	alltgt_info->num_devices = num_devices;
1520 
1521 	usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
1522 		sizeof(*devmap_info);
1523 	usr_entrylen *= sizeof(*devmap_info);
1524 	min_entrylen = min(usr_entrylen, kern_entrylen);
1525 
1526 	sg_copy_from_buffer(job->request_payload.sg_list,
1527 			    job->request_payload.sg_cnt,
1528 			    alltgt_info, (min_entrylen + sizeof(u64)));
1529 	kfree(alltgt_info);
1530 	return 0;
1531 }
1532 /**
1533  * mpi3mr_get_change_count - Get topology change count
1534  * @mrioc: Adapter instance reference
1535  * @job: BSG job reference
1536  *
1537  * This function copies the toplogy change count provided by the
1538  * driver in events and cached in the driver to the user
1539  * provided buffer for the specific controller.
1540  *
1541  * Return: 0 on success and proper error codes on failure
1542  */
mpi3mr_get_change_count(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1543 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
1544 	struct bsg_job *job)
1545 {
1546 	struct mpi3mr_change_count chgcnt;
1547 
1548 	memset(&chgcnt, 0, sizeof(chgcnt));
1549 	chgcnt.change_count = mrioc->change_count;
1550 	if (job->request_payload.payload_len >= sizeof(chgcnt)) {
1551 		sg_copy_from_buffer(job->request_payload.sg_list,
1552 				    job->request_payload.sg_cnt,
1553 				    &chgcnt, sizeof(chgcnt));
1554 		return 0;
1555 	}
1556 	return -EINVAL;
1557 }
1558 
1559 /**
1560  * mpi3mr_bsg_adp_reset - Issue controller reset
1561  * @mrioc: Adapter instance reference
1562  * @job: BSG job reference
1563  *
1564  * This function identifies the user provided reset type and
1565  * issues approporiate reset to the controller and wait for that
1566  * to complete and reinitialize the controller and then returns
1567  *
1568  * Return: 0 on success and proper error codes on failure
1569  */
mpi3mr_bsg_adp_reset(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1570 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
1571 	struct bsg_job *job)
1572 {
1573 	long rval = -EINVAL;
1574 	u8 save_snapdump;
1575 	struct mpi3mr_bsg_adp_reset adpreset;
1576 
1577 	if (job->request_payload.payload_len !=
1578 			sizeof(adpreset)) {
1579 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
1580 		    __func__);
1581 		goto out;
1582 	}
1583 
1584 	if (mrioc->unrecoverable || mrioc->block_on_pci_err)
1585 		return -EINVAL;
1586 
1587 	sg_copy_to_buffer(job->request_payload.sg_list,
1588 			  job->request_payload.sg_cnt,
1589 			  &adpreset, sizeof(adpreset));
1590 
1591 	switch (adpreset.reset_type) {
1592 	case MPI3MR_BSG_ADPRESET_SOFT:
1593 		save_snapdump = 0;
1594 		break;
1595 	case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
1596 		save_snapdump = 1;
1597 		break;
1598 	default:
1599 		dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
1600 		    __func__, adpreset.reset_type);
1601 		goto out;
1602 	}
1603 
1604 	rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
1605 	    save_snapdump);
1606 
1607 	if (rval)
1608 		dprint_bsg_err(mrioc,
1609 		    "%s: reset handler returned error(%ld) for reset type %d\n",
1610 		    __func__, rval, adpreset.reset_type);
1611 out:
1612 	return rval;
1613 }
1614 
1615 /**
1616  * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
1617  * @mrioc: Adapter instance reference
1618  * @job: BSG job reference
1619  *
1620  * This function provides adapter information for the given
1621  * controller
1622  *
1623  * Return: 0 on success and proper error codes on failure
1624  */
mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc * mrioc,struct bsg_job * job)1625 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
1626 	struct bsg_job *job)
1627 {
1628 	enum mpi3mr_iocstate ioc_state;
1629 	struct mpi3mr_bsg_in_adpinfo adpinfo;
1630 
1631 	memset(&adpinfo, 0, sizeof(adpinfo));
1632 	adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
1633 	adpinfo.pci_dev_id = mrioc->pdev->device;
1634 	adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
1635 	adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
1636 	adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
1637 	adpinfo.pci_bus = mrioc->pdev->bus->number;
1638 	adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
1639 	adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
1640 	adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
1641 	adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
1642 
1643 	ioc_state = mpi3mr_get_iocstate(mrioc);
1644 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
1645 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
1646 	else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
1647 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
1648 	else if (ioc_state == MRIOC_STATE_FAULT)
1649 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
1650 	else
1651 		adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
1652 
1653 	memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
1654 	    sizeof(adpinfo.driver_info));
1655 
1656 	if (job->request_payload.payload_len >= sizeof(adpinfo)) {
1657 		sg_copy_from_buffer(job->request_payload.sg_list,
1658 				    job->request_payload.sg_cnt,
1659 				    &adpinfo, sizeof(adpinfo));
1660 		return 0;
1661 	}
1662 	return -EINVAL;
1663 }
1664 
1665 /**
1666  * mpi3mr_bsg_process_drv_cmds - Driver Command handler
1667  * @job: BSG job reference
1668  *
1669  * This function is the top level handler for driver commands,
1670  * this does basic validation of the buffer and identifies the
1671  * opcode and switches to correct sub handler.
1672  *
1673  * Return: 0 on success and proper error codes on failure
1674  */
mpi3mr_bsg_process_drv_cmds(struct bsg_job * job)1675 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
1676 {
1677 	long rval = -EINVAL;
1678 	struct mpi3mr_ioc *mrioc = NULL;
1679 	struct mpi3mr_bsg_packet *bsg_req = NULL;
1680 	struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
1681 
1682 	bsg_req = job->request;
1683 	drvrcmd = &bsg_req->cmd.drvrcmd;
1684 
1685 	mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
1686 	if (!mrioc)
1687 		return -ENODEV;
1688 
1689 	if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
1690 		rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
1691 		return rval;
1692 	}
1693 
1694 	if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
1695 		return -ERESTARTSYS;
1696 
1697 	switch (drvrcmd->opcode) {
1698 	case MPI3MR_DRVBSG_OPCODE_ADPRESET:
1699 		rval = mpi3mr_bsg_adp_reset(mrioc, job);
1700 		break;
1701 	case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
1702 		rval = mpi3mr_get_all_tgt_info(mrioc, job);
1703 		break;
1704 	case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
1705 		rval = mpi3mr_get_change_count(mrioc, job);
1706 		break;
1707 	case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
1708 		rval = mpi3mr_enable_logdata(mrioc, job);
1709 		break;
1710 	case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
1711 		rval = mpi3mr_get_logdata(mrioc, job);
1712 		break;
1713 	case MPI3MR_DRVBSG_OPCODE_PELENABLE:
1714 		rval = mpi3mr_bsg_pel_enable(mrioc, job);
1715 		break;
1716 	case MPI3MR_DRVBSG_OPCODE_QUERY_HDB:
1717 		rval = mpi3mr_bsg_query_hdb(mrioc, job);
1718 		break;
1719 	case MPI3MR_DRVBSG_OPCODE_REPOST_HDB:
1720 		rval = mpi3mr_bsg_repost_hdb(mrioc, job);
1721 		break;
1722 	case MPI3MR_DRVBSG_OPCODE_UPLOAD_HDB:
1723 		rval = mpi3mr_bsg_upload_hdb(mrioc, job);
1724 		break;
1725 	case MPI3MR_DRVBSG_OPCODE_REFRESH_HDB_TRIGGERS:
1726 		rval = mpi3mr_bsg_refresh_hdb_triggers(mrioc, job);
1727 		break;
1728 	case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
1729 	default:
1730 		pr_err("%s: unsupported driver command opcode %d\n",
1731 		    MPI3MR_DRIVER_NAME, drvrcmd->opcode);
1732 		break;
1733 	}
1734 	mutex_unlock(&mrioc->bsg_cmds.mutex);
1735 	return rval;
1736 }
1737 
1738 /**
1739  * mpi3mr_total_num_ioctl_sges - Count number of SGEs required
1740  * @drv_bufs: DMA address of the buffers to be placed in sgl
1741  * @bufcnt: Number of DMA buffers
1742  *
1743  * This function returns total number of data SGEs required
1744  * including zero length SGEs and excluding management request
1745  * and response buffer for the given list of data buffer
1746  * descriptors
1747  *
1748  * Return: Number of SGE elements needed
1749  */
mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map * drv_bufs,u8 bufcnt)1750 static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs,
1751 					      u8 bufcnt)
1752 {
1753 	u16 i, sge_count = 0;
1754 
1755 	for (i = 0; i < bufcnt; i++, drv_bufs++) {
1756 		if (drv_bufs->data_dir == DMA_NONE ||
1757 		    drv_bufs->kern_buf)
1758 			continue;
1759 		sge_count += drv_bufs->num_dma_desc;
1760 		if (!drv_bufs->num_dma_desc)
1761 			sge_count++;
1762 	}
1763 	return sge_count;
1764 }
1765 
1766 /**
1767  * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
1768  * @mrioc: Adapter instance reference
1769  * @mpi_req: MPI request
1770  * @sgl_offset: offset to start sgl in the MPI request
1771  * @drv_bufs: DMA address of the buffers to be placed in sgl
1772  * @bufcnt: Number of DMA buffers
1773  * @is_rmc: Does the buffer list has management command buffer
1774  * @is_rmr: Does the buffer list has management response buffer
1775  * @num_datasges: Number of data buffers in the list
1776  *
1777  * This function places the DMA address of the given buffers in
1778  * proper format as SGEs in the given MPI request.
1779  *
1780  * Return: 0 on success,-1 on failure
1781  */
mpi3mr_bsg_build_sgl(struct mpi3mr_ioc * mrioc,u8 * mpi_req,u32 sgl_offset,struct mpi3mr_buf_map * drv_bufs,u8 bufcnt,u8 is_rmc,u8 is_rmr,u8 num_datasges)1782 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req,
1783 				u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs,
1784 				u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges)
1785 {
1786 	struct mpi3_request_header *mpi_header =
1787 		(struct mpi3_request_header *)mpi_req;
1788 	u8 *sgl = (mpi_req + sgl_offset), count = 0;
1789 	struct mpi3_mgmt_passthrough_request *rmgmt_req =
1790 	    (struct mpi3_mgmt_passthrough_request *)mpi_req;
1791 	struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
1792 	u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag;
1793 	u16 available_sges, i, sges_needed;
1794 	u32 sge_element_size = sizeof(struct mpi3_sge_common);
1795 	bool chain_used = false;
1796 
1797 	sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1798 		MPI3_SGE_FLAGS_DLAS_SYSTEM;
1799 	sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER;
1800 	sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST;
1801 	last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
1802 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
1803 
1804 	sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt);
1805 
1806 	if (is_rmc) {
1807 		mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
1808 		    sgl_flags_last, drv_buf_iter->kern_buf_len,
1809 		    drv_buf_iter->kern_buf_dma);
1810 		sgl = (u8 *)drv_buf_iter->kern_buf +
1811 			drv_buf_iter->bsg_buf_len;
1812 		available_sges = (drv_buf_iter->kern_buf_len -
1813 		    drv_buf_iter->bsg_buf_len) / sge_element_size;
1814 
1815 		if (sges_needed > available_sges)
1816 			return -1;
1817 
1818 		chain_used = true;
1819 		drv_buf_iter++;
1820 		count++;
1821 		if (is_rmr) {
1822 			mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
1823 			    sgl_flags_last, drv_buf_iter->kern_buf_len,
1824 			    drv_buf_iter->kern_buf_dma);
1825 			drv_buf_iter++;
1826 			count++;
1827 		} else
1828 			mpi3mr_build_zero_len_sge(
1829 			    &rmgmt_req->response_sgl);
1830 		if (num_datasges) {
1831 			i = 0;
1832 			goto build_sges;
1833 		}
1834 	} else {
1835 		if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ)
1836 			return -1;
1837 		available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) /
1838 		sge_element_size;
1839 		if (!available_sges)
1840 			return -1;
1841 	}
1842 	if (!num_datasges) {
1843 		mpi3mr_build_zero_len_sge(sgl);
1844 		return 0;
1845 	}
1846 	if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
1847 		if ((sges_needed > 2) || (sges_needed > available_sges))
1848 			return -1;
1849 		for (; count < bufcnt; count++, drv_buf_iter++) {
1850 			if (drv_buf_iter->data_dir == DMA_NONE ||
1851 			    !drv_buf_iter->num_dma_desc)
1852 				continue;
1853 			mpi3mr_add_sg_single(sgl, sgl_flags_last,
1854 					     drv_buf_iter->dma_desc[0].size,
1855 					     drv_buf_iter->dma_desc[0].dma_addr);
1856 			sgl += sge_element_size;
1857 		}
1858 		return 0;
1859 	}
1860 	i = 0;
1861 
1862 build_sges:
1863 	for (; count < bufcnt; count++, drv_buf_iter++) {
1864 		if (drv_buf_iter->data_dir == DMA_NONE)
1865 			continue;
1866 		if (!drv_buf_iter->num_dma_desc) {
1867 			if (chain_used && !available_sges)
1868 				return -1;
1869 			if (!chain_used && (available_sges == 1) &&
1870 			    (sges_needed > 1))
1871 				goto setup_chain;
1872 			flag = sgl_flag_eob;
1873 			if (num_datasges == 1)
1874 				flag = sgl_flags_last;
1875 			mpi3mr_add_sg_single(sgl, flag, 0, 0);
1876 			sgl += sge_element_size;
1877 			sges_needed--;
1878 			available_sges--;
1879 			num_datasges--;
1880 			continue;
1881 		}
1882 		for (; i < drv_buf_iter->num_dma_desc; i++) {
1883 			if (chain_used && !available_sges)
1884 				return -1;
1885 			if (!chain_used && (available_sges == 1) &&
1886 			    (sges_needed > 1))
1887 				goto setup_chain;
1888 			flag = sgl_flags;
1889 			if (i == (drv_buf_iter->num_dma_desc - 1)) {
1890 				if (num_datasges == 1)
1891 					flag = sgl_flags_last;
1892 				else
1893 					flag = sgl_flag_eob;
1894 			}
1895 
1896 			mpi3mr_add_sg_single(sgl, flag,
1897 					     drv_buf_iter->dma_desc[i].size,
1898 					     drv_buf_iter->dma_desc[i].dma_addr);
1899 			sgl += sge_element_size;
1900 			available_sges--;
1901 			sges_needed--;
1902 		}
1903 		num_datasges--;
1904 		i = 0;
1905 	}
1906 	return 0;
1907 
1908 setup_chain:
1909 	available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
1910 	if (sges_needed > available_sges)
1911 		return -1;
1912 	mpi3mr_add_sg_single(sgl, last_chain_sgl_flag,
1913 			     (sges_needed * sge_element_size),
1914 			     mrioc->ioctl_chain_sge.dma_addr);
1915 	memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
1916 	sgl = (u8 *)mrioc->ioctl_chain_sge.addr;
1917 	chain_used = true;
1918 	goto build_sges;
1919 }
1920 
1921 /**
1922  * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
1923  * @nvme_encap_request: NVMe encapsulated MPI request
1924  *
1925  * This function returns the type of the data format specified
1926  * in user provided NVMe command in NVMe encapsulated request.
1927  *
1928  * Return: Data format of the NVMe command (PRP/SGL etc)
1929  */
mpi3mr_get_nvme_data_fmt(struct mpi3_nvme_encapsulated_request * nvme_encap_request)1930 static unsigned int mpi3mr_get_nvme_data_fmt(
1931 	struct mpi3_nvme_encapsulated_request *nvme_encap_request)
1932 {
1933 	u8 format = 0;
1934 
1935 	format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
1936 	return format;
1937 
1938 }
1939 
1940 /**
1941  * mpi3mr_build_nvme_sgl - SGL constructor for NVME
1942  *				   encapsulated request
1943  * @mrioc: Adapter instance reference
1944  * @nvme_encap_request: NVMe encapsulated MPI request
1945  * @drv_bufs: DMA address of the buffers to be placed in sgl
1946  * @bufcnt: Number of DMA buffers
1947  *
1948  * This function places the DMA address of the given buffers in
1949  * proper format as SGEs in the given NVMe encapsulated request.
1950  *
1951  * Return: 0 on success, -1 on failure
1952  */
mpi3mr_build_nvme_sgl(struct mpi3mr_ioc * mrioc,struct mpi3_nvme_encapsulated_request * nvme_encap_request,struct mpi3mr_buf_map * drv_bufs,u8 bufcnt)1953 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
1954 	struct mpi3_nvme_encapsulated_request *nvme_encap_request,
1955 	struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
1956 {
1957 	struct mpi3mr_nvme_pt_sge *nvme_sgl;
1958 	__le64 sgl_dma;
1959 	u8 count;
1960 	size_t length = 0;
1961 	u16 available_sges = 0, i;
1962 	u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge);
1963 	struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
1964 	u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
1965 			    mrioc->facts.sge_mod_shift) << 32);
1966 	u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
1967 			  mrioc->facts.sge_mod_shift) << 32;
1968 	u32 size;
1969 
1970 	nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
1971 	    ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
1972 
1973 	/*
1974 	 * Not all commands require a data transfer. If no data, just return
1975 	 * without constructing any sgl.
1976 	 */
1977 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
1978 		if (drv_buf_iter->data_dir == DMA_NONE)
1979 			continue;
1980 		length = drv_buf_iter->kern_buf_len;
1981 		break;
1982 	}
1983 	if (!length || !drv_buf_iter->num_dma_desc)
1984 		return 0;
1985 
1986 	if (drv_buf_iter->num_dma_desc == 1) {
1987 		available_sges = 1;
1988 		goto build_sges;
1989 	}
1990 
1991 	sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr);
1992 	if (sgl_dma & sgemod_mask) {
1993 		dprint_bsg_err(mrioc,
1994 		    "%s: SGL chain address collides with SGE modifier\n",
1995 		    __func__);
1996 		return -1;
1997 	}
1998 
1999 	sgl_dma &= ~sgemod_mask;
2000 	sgl_dma |= sgemod_val;
2001 
2002 	memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size);
2003 	available_sges = mrioc->ioctl_chain_sge.size / sge_element_size;
2004 	if (available_sges < drv_buf_iter->num_dma_desc)
2005 		return -1;
2006 	memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
2007 	nvme_sgl->base_addr = sgl_dma;
2008 	size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge);
2009 	nvme_sgl->length = cpu_to_le32(size);
2010 	nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT;
2011 	nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr;
2012 
2013 build_sges:
2014 	for (i = 0; i < drv_buf_iter->num_dma_desc; i++) {
2015 		sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr);
2016 		if (sgl_dma & sgemod_mask) {
2017 			dprint_bsg_err(mrioc,
2018 				       "%s: SGL address collides with SGE modifier\n",
2019 				       __func__);
2020 		return -1;
2021 		}
2022 
2023 		sgl_dma &= ~sgemod_mask;
2024 		sgl_dma |= sgemod_val;
2025 
2026 		nvme_sgl->base_addr = sgl_dma;
2027 		nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size);
2028 		nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT;
2029 		nvme_sgl++;
2030 		available_sges--;
2031 	}
2032 
2033 	return 0;
2034 }
2035 
2036 /**
2037  * mpi3mr_build_nvme_prp - PRP constructor for NVME
2038  *			       encapsulated request
2039  * @mrioc: Adapter instance reference
2040  * @nvme_encap_request: NVMe encapsulated MPI request
2041  * @drv_bufs: DMA address of the buffers to be placed in SGL
2042  * @bufcnt: Number of DMA buffers
2043  *
2044  * This function places the DMA address of the given buffers in
2045  * proper format as PRP entries in the given NVMe encapsulated
2046  * request.
2047  *
2048  * Return: 0 on success, -1 on failure
2049  */
mpi3mr_build_nvme_prp(struct mpi3mr_ioc * mrioc,struct mpi3_nvme_encapsulated_request * nvme_encap_request,struct mpi3mr_buf_map * drv_bufs,u8 bufcnt)2050 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
2051 	struct mpi3_nvme_encapsulated_request *nvme_encap_request,
2052 	struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
2053 {
2054 	int prp_size = MPI3MR_NVME_PRP_SIZE;
2055 	__le64 *prp_entry, *prp1_entry, *prp2_entry;
2056 	__le64 *prp_page;
2057 	dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
2058 	u32 offset, entry_len, dev_pgsz;
2059 	u32 page_mask_result, page_mask;
2060 	size_t length = 0, desc_len;
2061 	u8 count;
2062 	struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
2063 	u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
2064 			    mrioc->facts.sge_mod_shift) << 32);
2065 	u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
2066 			  mrioc->facts.sge_mod_shift) << 32;
2067 	u16 dev_handle = nvme_encap_request->dev_handle;
2068 	struct mpi3mr_tgt_dev *tgtdev;
2069 	u16 desc_count = 0;
2070 
2071 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2072 	if (!tgtdev) {
2073 		dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
2074 			__func__, dev_handle);
2075 		return -1;
2076 	}
2077 
2078 	if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
2079 		dprint_bsg_err(mrioc,
2080 		    "%s: NVMe device page size is zero for handle 0x%04x\n",
2081 		    __func__, dev_handle);
2082 		mpi3mr_tgtdev_put(tgtdev);
2083 		return -1;
2084 	}
2085 
2086 	dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
2087 	mpi3mr_tgtdev_put(tgtdev);
2088 	page_mask = dev_pgsz - 1;
2089 
2090 	if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) {
2091 		dprint_bsg_err(mrioc,
2092 			       "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n",
2093 			       __func__, dev_pgsz,  MPI3MR_IOCTL_SGE_SIZE, dev_handle);
2094 		return -1;
2095 	}
2096 
2097 	if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) {
2098 		dprint_bsg_err(mrioc,
2099 			       "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n",
2100 			       __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle);
2101 		return -1;
2102 	}
2103 
2104 	/*
2105 	 * Not all commands require a data transfer. If no data, just return
2106 	 * without constructing any PRP.
2107 	 */
2108 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
2109 		if (drv_buf_iter->data_dir == DMA_NONE)
2110 			continue;
2111 		length = drv_buf_iter->kern_buf_len;
2112 		break;
2113 	}
2114 
2115 	if (!length || !drv_buf_iter->num_dma_desc)
2116 		return 0;
2117 
2118 	for (count = 0; count < drv_buf_iter->num_dma_desc; count++) {
2119 		dma_addr = drv_buf_iter->dma_desc[count].dma_addr;
2120 		if (dma_addr & page_mask) {
2121 			dprint_bsg_err(mrioc,
2122 				       "%s:dma_addr %pad is not aligned with page size 0x%x\n",
2123 				       __func__,  &dma_addr, dev_pgsz);
2124 			return -1;
2125 		}
2126 	}
2127 
2128 	dma_addr = drv_buf_iter->dma_desc[0].dma_addr;
2129 	desc_len = drv_buf_iter->dma_desc[0].size;
2130 
2131 	mrioc->prp_sz = 0;
2132 	mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
2133 	    dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
2134 
2135 	if (!mrioc->prp_list_virt)
2136 		return -1;
2137 	mrioc->prp_sz = dev_pgsz;
2138 
2139 	/*
2140 	 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
2141 	 * PRP1 is located at a 24 byte offset from the start of the NVMe
2142 	 * command.  Then set the current PRP entry pointer to PRP1.
2143 	 */
2144 	prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
2145 	    MPI3MR_NVME_CMD_PRP1_OFFSET);
2146 	prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
2147 	    MPI3MR_NVME_CMD_PRP2_OFFSET);
2148 	prp_entry = prp1_entry;
2149 	/*
2150 	 * For the PRP entries, use the specially allocated buffer of
2151 	 * contiguous memory.
2152 	 */
2153 	prp_page = (__le64 *)mrioc->prp_list_virt;
2154 	prp_page_dma = mrioc->prp_list_dma;
2155 
2156 	/*
2157 	 * Check if we are within 1 entry of a page boundary we don't
2158 	 * want our first entry to be a PRP List entry.
2159 	 */
2160 	page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2161 	if (!page_mask_result) {
2162 		dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
2163 		    __func__);
2164 		goto err_out;
2165 	}
2166 
2167 	/*
2168 	 * Set PRP physical pointer, which initially points to the current PRP
2169 	 * DMA memory page.
2170 	 */
2171 	prp_entry_dma = prp_page_dma;
2172 
2173 
2174 	/* Loop while the length is not zero. */
2175 	while (length) {
2176 		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2177 		if (!page_mask_result && (length >  dev_pgsz)) {
2178 			dprint_bsg_err(mrioc,
2179 			    "%s: single PRP page is not sufficient\n",
2180 			    __func__);
2181 			goto err_out;
2182 		}
2183 
2184 		/* Need to handle if entry will be part of a page. */
2185 		offset = dma_addr & page_mask;
2186 		entry_len = dev_pgsz - offset;
2187 
2188 		if (prp_entry == prp1_entry) {
2189 			/*
2190 			 * Must fill in the first PRP pointer (PRP1) before
2191 			 * moving on.
2192 			 */
2193 			*prp1_entry = cpu_to_le64(dma_addr);
2194 			if (*prp1_entry & sgemod_mask) {
2195 				dprint_bsg_err(mrioc,
2196 				    "%s: PRP1 address collides with SGE modifier\n",
2197 				    __func__);
2198 				goto err_out;
2199 			}
2200 			*prp1_entry &= ~sgemod_mask;
2201 			*prp1_entry |= sgemod_val;
2202 
2203 			/*
2204 			 * Now point to the second PRP entry within the
2205 			 * command (PRP2).
2206 			 */
2207 			prp_entry = prp2_entry;
2208 		} else if (prp_entry == prp2_entry) {
2209 			/*
2210 			 * Should the PRP2 entry be a PRP List pointer or just
2211 			 * a regular PRP pointer?  If there is more than one
2212 			 * more page of data, must use a PRP List pointer.
2213 			 */
2214 			if (length > dev_pgsz) {
2215 				/*
2216 				 * PRP2 will contain a PRP List pointer because
2217 				 * more PRP's are needed with this command. The
2218 				 * list will start at the beginning of the
2219 				 * contiguous buffer.
2220 				 */
2221 				*prp2_entry = cpu_to_le64(prp_entry_dma);
2222 				if (*prp2_entry & sgemod_mask) {
2223 					dprint_bsg_err(mrioc,
2224 					    "%s: PRP list address collides with SGE modifier\n",
2225 					    __func__);
2226 					goto err_out;
2227 				}
2228 				*prp2_entry &= ~sgemod_mask;
2229 				*prp2_entry |= sgemod_val;
2230 
2231 				/*
2232 				 * The next PRP Entry will be the start of the
2233 				 * first PRP List.
2234 				 */
2235 				prp_entry = prp_page;
2236 				continue;
2237 			} else {
2238 				/*
2239 				 * After this, the PRP Entries are complete.
2240 				 * This command uses 2 PRP's and no PRP list.
2241 				 */
2242 				*prp2_entry = cpu_to_le64(dma_addr);
2243 				if (*prp2_entry & sgemod_mask) {
2244 					dprint_bsg_err(mrioc,
2245 					    "%s: PRP2 collides with SGE modifier\n",
2246 					    __func__);
2247 					goto err_out;
2248 				}
2249 				*prp2_entry &= ~sgemod_mask;
2250 				*prp2_entry |= sgemod_val;
2251 			}
2252 		} else {
2253 			/*
2254 			 * Put entry in list and bump the addresses.
2255 			 *
2256 			 * After PRP1 and PRP2 are filled in, this will fill in
2257 			 * all remaining PRP entries in a PRP List, one per
2258 			 * each time through the loop.
2259 			 */
2260 			*prp_entry = cpu_to_le64(dma_addr);
2261 			if (*prp_entry & sgemod_mask) {
2262 				dprint_bsg_err(mrioc,
2263 				    "%s: PRP address collides with SGE modifier\n",
2264 				    __func__);
2265 				goto err_out;
2266 			}
2267 			*prp_entry &= ~sgemod_mask;
2268 			*prp_entry |= sgemod_val;
2269 			prp_entry++;
2270 			prp_entry_dma += prp_size;
2271 		}
2272 
2273 		/* decrement length accounting for last partial page. */
2274 		if (entry_len >= length) {
2275 			length = 0;
2276 		} else {
2277 			if (entry_len <= desc_len) {
2278 				dma_addr += entry_len;
2279 				desc_len -= entry_len;
2280 			}
2281 			if (!desc_len) {
2282 				if ((++desc_count) >=
2283 				   drv_buf_iter->num_dma_desc) {
2284 					dprint_bsg_err(mrioc,
2285 						       "%s: Invalid len %zd while building PRP\n",
2286 						       __func__, length);
2287 					goto err_out;
2288 				}
2289 				dma_addr =
2290 				    drv_buf_iter->dma_desc[desc_count].dma_addr;
2291 				desc_len =
2292 				    drv_buf_iter->dma_desc[desc_count].size;
2293 			}
2294 			length -= entry_len;
2295 		}
2296 	}
2297 
2298 	return 0;
2299 err_out:
2300 	if (mrioc->prp_list_virt) {
2301 		dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
2302 		    mrioc->prp_list_virt, mrioc->prp_list_dma);
2303 		mrioc->prp_list_virt = NULL;
2304 	}
2305 	return -1;
2306 }
2307 
2308 /**
2309  * mpi3mr_map_data_buffer_dma - build dma descriptors for data
2310  *                              buffers
2311  * @mrioc: Adapter instance reference
2312  * @drv_buf: buffer map descriptor
2313  * @desc_count: Number of already consumed dma descriptors
2314  *
2315  * This function computes how many pre-allocated DMA descriptors
2316  * are required for the given data buffer and if those number of
2317  * descriptors are free, then setup the mapping of the scattered
2318  * DMA address to the given data buffer, if the data direction
2319  * of the buffer is DMA_TO_DEVICE then the actual data is copied to
2320  * the DMA buffers
2321  *
2322  * Return: 0 on success, -1 on failure
2323  */
mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc * mrioc,struct mpi3mr_buf_map * drv_buf,u16 desc_count)2324 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc,
2325 				      struct mpi3mr_buf_map *drv_buf,
2326 				      u16 desc_count)
2327 {
2328 	u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE;
2329 	u32 buf_len = drv_buf->kern_buf_len, copied_len = 0;
2330 
2331 	if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE)
2332 		needed_desc++;
2333 	if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) {
2334 		dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n",
2335 			       __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE);
2336 		return -1;
2337 	}
2338 	drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc,
2339 				    GFP_KERNEL);
2340 	if (!drv_buf->dma_desc)
2341 		return -1;
2342 	for (i = 0; i < needed_desc; i++, desc_count++) {
2343 		drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr;
2344 		drv_buf->dma_desc[i].dma_addr =
2345 		    mrioc->ioctl_sge[desc_count].dma_addr;
2346 		if (buf_len < mrioc->ioctl_sge[desc_count].size)
2347 			drv_buf->dma_desc[i].size = buf_len;
2348 		else
2349 			drv_buf->dma_desc[i].size =
2350 			    mrioc->ioctl_sge[desc_count].size;
2351 		buf_len -= drv_buf->dma_desc[i].size;
2352 		memset(drv_buf->dma_desc[i].addr, 0,
2353 		       mrioc->ioctl_sge[desc_count].size);
2354 		if (drv_buf->data_dir == DMA_TO_DEVICE) {
2355 			memcpy(drv_buf->dma_desc[i].addr,
2356 			       drv_buf->bsg_buf + copied_len,
2357 			       drv_buf->dma_desc[i].size);
2358 			copied_len += drv_buf->dma_desc[i].size;
2359 		}
2360 	}
2361 	drv_buf->num_dma_desc = needed_desc;
2362 	return 0;
2363 }
2364 /**
2365  * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
2366  * @job: BSG job reference
2367  *
2368  * This function is the top level handler for MPI Pass through
2369  * command, this does basic validation of the input data buffers,
2370  * identifies the given buffer types and MPI command, allocates
2371  * DMAable memory for user given buffers, construstcs SGL
2372  * properly and passes the command to the firmware.
2373  *
2374  * Once the MPI command is completed the driver copies the data
2375  * if any and reply, sense information to user provided buffers.
2376  * If the command is timed out then issues controller reset
2377  * prior to returning.
2378  *
2379  * Return: 0 on success and proper error codes on failure
2380  */
2381 
mpi3mr_bsg_process_mpt_cmds(struct bsg_job * job)2382 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
2383 {
2384 	long rval = -EINVAL;
2385 	struct mpi3mr_ioc *mrioc = NULL;
2386 	u8 *mpi_req = NULL, *sense_buff_k = NULL;
2387 	u8 mpi_msg_size = 0;
2388 	struct mpi3mr_bsg_packet *bsg_req = NULL;
2389 	struct mpi3mr_bsg_mptcmd *karg;
2390 	struct mpi3mr_buf_entry *buf_entries = NULL;
2391 	struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
2392 	u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0;
2393 	u8 din_cnt = 0, dout_cnt = 0;
2394 	u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF;
2395 	u8 block_io = 0, nvme_fmt = 0, resp_code = 0;
2396 	struct mpi3_request_header *mpi_header = NULL;
2397 	struct mpi3_status_reply_descriptor *status_desc;
2398 	struct mpi3_scsi_task_mgmt_request *tm_req;
2399 	u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
2400 	u16 dev_handle;
2401 	struct mpi3mr_tgt_dev *tgtdev;
2402 	struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
2403 	struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
2404 	u32 din_size = 0, dout_size = 0;
2405 	u8 *din_buf = NULL, *dout_buf = NULL;
2406 	u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
2407 	u16 rmc_size  = 0, desc_count = 0;
2408 
2409 	bsg_req = job->request;
2410 	karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
2411 
2412 	mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
2413 	if (!mrioc)
2414 		return -ENODEV;
2415 
2416 	if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
2417 		return -ERESTARTSYS;
2418 
2419 	if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
2420 		dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
2421 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2422 		return -EAGAIN;
2423 	}
2424 
2425 	if (!mrioc->ioctl_sges_allocated) {
2426 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2427 		dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
2428 			       __func__);
2429 		return -ENOMEM;
2430 	}
2431 
2432 	if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
2433 		karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
2434 
2435 	mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
2436 	if (!mpi_req) {
2437 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2438 		return -ENOMEM;
2439 	}
2440 	mpi_header = (struct mpi3_request_header *)mpi_req;
2441 
2442 	bufcnt = karg->buf_entry_list.num_of_entries;
2443 	drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
2444 	if (!drv_bufs) {
2445 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2446 		rval = -ENOMEM;
2447 		goto out;
2448 	}
2449 
2450 	dout_buf = kzalloc(job->request_payload.payload_len,
2451 				      GFP_KERNEL);
2452 	if (!dout_buf) {
2453 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2454 		rval = -ENOMEM;
2455 		goto out;
2456 	}
2457 
2458 	din_buf = kzalloc(job->reply_payload.payload_len,
2459 				     GFP_KERNEL);
2460 	if (!din_buf) {
2461 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2462 		rval = -ENOMEM;
2463 		goto out;
2464 	}
2465 
2466 	sg_copy_to_buffer(job->request_payload.sg_list,
2467 			  job->request_payload.sg_cnt,
2468 			  dout_buf, job->request_payload.payload_len);
2469 
2470 	buf_entries = karg->buf_entry_list.buf_entry;
2471 	sgl_din_iter = din_buf;
2472 	sgl_dout_iter = dout_buf;
2473 	drv_buf_iter = drv_bufs;
2474 
2475 	for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
2476 
2477 		switch (buf_entries->buf_type) {
2478 		case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
2479 			sgl_iter = sgl_dout_iter;
2480 			sgl_dout_iter += buf_entries->buf_len;
2481 			drv_buf_iter->data_dir = DMA_TO_DEVICE;
2482 			is_rmcb = 1;
2483 			if ((count != 0) || !buf_entries->buf_len)
2484 				invalid_be = 1;
2485 			break;
2486 		case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
2487 			sgl_iter = sgl_din_iter;
2488 			sgl_din_iter += buf_entries->buf_len;
2489 			drv_buf_iter->data_dir = DMA_FROM_DEVICE;
2490 			is_rmrb = 1;
2491 			if (count != 1 || !is_rmcb || !buf_entries->buf_len)
2492 				invalid_be = 1;
2493 			break;
2494 		case MPI3MR_BSG_BUFTYPE_DATA_IN:
2495 			sgl_iter = sgl_din_iter;
2496 			sgl_din_iter += buf_entries->buf_len;
2497 			drv_buf_iter->data_dir = DMA_FROM_DEVICE;
2498 			din_cnt++;
2499 			din_size += buf_entries->buf_len;
2500 			if ((din_cnt > 1) && !is_rmcb)
2501 				invalid_be = 1;
2502 			break;
2503 		case MPI3MR_BSG_BUFTYPE_DATA_OUT:
2504 			sgl_iter = sgl_dout_iter;
2505 			sgl_dout_iter += buf_entries->buf_len;
2506 			drv_buf_iter->data_dir = DMA_TO_DEVICE;
2507 			dout_cnt++;
2508 			dout_size += buf_entries->buf_len;
2509 			if ((dout_cnt > 1) && !is_rmcb)
2510 				invalid_be = 1;
2511 			break;
2512 		case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
2513 			sgl_iter = sgl_din_iter;
2514 			sgl_din_iter += buf_entries->buf_len;
2515 			drv_buf_iter->data_dir = DMA_NONE;
2516 			mpirep_offset = count;
2517 			if (!buf_entries->buf_len)
2518 				invalid_be = 1;
2519 			break;
2520 		case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
2521 			sgl_iter = sgl_din_iter;
2522 			sgl_din_iter += buf_entries->buf_len;
2523 			drv_buf_iter->data_dir = DMA_NONE;
2524 			erb_offset = count;
2525 			if (!buf_entries->buf_len)
2526 				invalid_be = 1;
2527 			break;
2528 		case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
2529 			sgl_iter = sgl_dout_iter;
2530 			sgl_dout_iter += buf_entries->buf_len;
2531 			drv_buf_iter->data_dir = DMA_NONE;
2532 			mpi_msg_size = buf_entries->buf_len;
2533 			if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
2534 					(mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
2535 				dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
2536 					__func__);
2537 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2538 				rval = -EINVAL;
2539 				goto out;
2540 			}
2541 			memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
2542 			break;
2543 		default:
2544 			invalid_be = 1;
2545 			break;
2546 		}
2547 		if (invalid_be) {
2548 			dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
2549 				__func__);
2550 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2551 			rval = -EINVAL;
2552 			goto out;
2553 		}
2554 
2555 		if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
2556 			dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
2557 				       __func__);
2558 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2559 			rval = -EINVAL;
2560 			goto out;
2561 		}
2562 		if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
2563 			dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
2564 				       __func__);
2565 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2566 			rval = -EINVAL;
2567 			goto out;
2568 		}
2569 
2570 		drv_buf_iter->bsg_buf = sgl_iter;
2571 		drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
2572 	}
2573 
2574 	if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) {
2575 		dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n",
2576 			       __func__, __LINE__, mpi_header->function, din_size,
2577 			       dout_size);
2578 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2579 		rval = -EINVAL;
2580 		goto out;
2581 	}
2582 
2583 	if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
2584 		dprint_bsg_err(mrioc,
2585 		    "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
2586 		    __func__, __LINE__, mpi_header->function, din_size);
2587 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2588 		rval = -EINVAL;
2589 		goto out;
2590 	}
2591 	if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
2592 		dprint_bsg_err(mrioc,
2593 		    "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
2594 		    __func__, __LINE__, mpi_header->function, dout_size);
2595 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2596 		rval = -EINVAL;
2597 		goto out;
2598 	}
2599 
2600 	if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) {
2601 		if (din_size > MPI3MR_IOCTL_SGE_SIZE ||
2602 		    dout_size > MPI3MR_IOCTL_SGE_SIZE) {
2603 			dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n",
2604 				       __func__, __LINE__, din_cnt, dout_cnt, din_size,
2605 			    dout_size);
2606 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2607 			rval = -EINVAL;
2608 			goto out;
2609 		}
2610 	}
2611 
2612 	drv_buf_iter = drv_bufs;
2613 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
2614 		if (drv_buf_iter->data_dir == DMA_NONE)
2615 			continue;
2616 
2617 		drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
2618 		if (is_rmcb && !count) {
2619 			drv_buf_iter->kern_buf_len =
2620 			    mrioc->ioctl_chain_sge.size;
2621 			drv_buf_iter->kern_buf =
2622 			    mrioc->ioctl_chain_sge.addr;
2623 			drv_buf_iter->kern_buf_dma =
2624 			    mrioc->ioctl_chain_sge.dma_addr;
2625 			drv_buf_iter->dma_desc = NULL;
2626 			drv_buf_iter->num_dma_desc = 0;
2627 			memset(drv_buf_iter->kern_buf, 0,
2628 			       drv_buf_iter->kern_buf_len);
2629 			tmplen = min(drv_buf_iter->kern_buf_len,
2630 				     drv_buf_iter->bsg_buf_len);
2631 			rmc_size = tmplen;
2632 			memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
2633 		} else if (is_rmrb && (count == 1)) {
2634 			drv_buf_iter->kern_buf_len =
2635 			    mrioc->ioctl_resp_sge.size;
2636 			drv_buf_iter->kern_buf =
2637 			    mrioc->ioctl_resp_sge.addr;
2638 			drv_buf_iter->kern_buf_dma =
2639 			    mrioc->ioctl_resp_sge.dma_addr;
2640 			drv_buf_iter->dma_desc = NULL;
2641 			drv_buf_iter->num_dma_desc = 0;
2642 			memset(drv_buf_iter->kern_buf, 0,
2643 			       drv_buf_iter->kern_buf_len);
2644 			tmplen = min(drv_buf_iter->kern_buf_len,
2645 				     drv_buf_iter->bsg_buf_len);
2646 			drv_buf_iter->kern_buf_len = tmplen;
2647 			memset(drv_buf_iter->bsg_buf, 0,
2648 			       drv_buf_iter->bsg_buf_len);
2649 		} else {
2650 			if (!drv_buf_iter->kern_buf_len)
2651 				continue;
2652 			if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) {
2653 				rval = -ENOMEM;
2654 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2655 				dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n",
2656 					       __func__, __LINE__);
2657 			goto out;
2658 		}
2659 			desc_count += drv_buf_iter->num_dma_desc;
2660 		}
2661 	}
2662 
2663 	if (erb_offset != 0xFF) {
2664 		sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
2665 		if (!sense_buff_k) {
2666 			rval = -ENOMEM;
2667 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2668 			goto out;
2669 		}
2670 	}
2671 
2672 	if (mrioc->unrecoverable) {
2673 		dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
2674 		    __func__);
2675 		rval = -EFAULT;
2676 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2677 		goto out;
2678 	}
2679 	if (mrioc->reset_in_progress) {
2680 		dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
2681 		rval = -EAGAIN;
2682 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2683 		goto out;
2684 	}
2685 	if (mrioc->stop_bsgs || mrioc->block_on_pci_err) {
2686 		dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
2687 		rval = -EAGAIN;
2688 		mutex_unlock(&mrioc->bsg_cmds.mutex);
2689 		goto out;
2690 	}
2691 
2692 	if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
2693 		nvme_fmt = mpi3mr_get_nvme_data_fmt(
2694 			(struct mpi3_nvme_encapsulated_request *)mpi_req);
2695 		if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
2696 			if (mpi3mr_build_nvme_prp(mrioc,
2697 			    (struct mpi3_nvme_encapsulated_request *)mpi_req,
2698 			    drv_bufs, bufcnt)) {
2699 				rval = -ENOMEM;
2700 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2701 				goto out;
2702 			}
2703 		} else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
2704 			nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
2705 			if (mpi3mr_build_nvme_sgl(mrioc,
2706 			    (struct mpi3_nvme_encapsulated_request *)mpi_req,
2707 			    drv_bufs, bufcnt)) {
2708 				rval = -EINVAL;
2709 				mutex_unlock(&mrioc->bsg_cmds.mutex);
2710 				goto out;
2711 			}
2712 		} else {
2713 			dprint_bsg_err(mrioc,
2714 			    "%s:invalid NVMe command format\n", __func__);
2715 			rval = -EINVAL;
2716 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2717 			goto out;
2718 		}
2719 	} else {
2720 		if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size,
2721 					 drv_bufs, bufcnt, is_rmcb, is_rmrb,
2722 					 (dout_cnt + din_cnt))) {
2723 			dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__);
2724 			rval = -EAGAIN;
2725 			mutex_unlock(&mrioc->bsg_cmds.mutex);
2726 			goto out;
2727 		}
2728 	}
2729 
2730 	if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
2731 		tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
2732 		if (tm_req->task_type !=
2733 		    MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
2734 			dev_handle = tm_req->dev_handle;
2735 			block_io = 1;
2736 		}
2737 	}
2738 	if (block_io) {
2739 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2740 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
2741 			stgt_priv = (struct mpi3mr_stgt_priv_data *)
2742 			    tgtdev->starget->hostdata;
2743 			atomic_inc(&stgt_priv->block_io);
2744 			mpi3mr_tgtdev_put(tgtdev);
2745 		}
2746 	}
2747 
2748 	mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
2749 	mrioc->bsg_cmds.is_waiting = 1;
2750 	mrioc->bsg_cmds.callback = NULL;
2751 	mrioc->bsg_cmds.is_sense = 0;
2752 	mrioc->bsg_cmds.sensebuf = sense_buff_k;
2753 	memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
2754 	mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
2755 	if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
2756 		dprint_bsg_info(mrioc,
2757 		    "%s: posting bsg request to the controller\n", __func__);
2758 		dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
2759 		    "bsg_mpi3_req");
2760 		if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
2761 			drv_buf_iter = &drv_bufs[0];
2762 			dprint_dump(drv_buf_iter->kern_buf,
2763 			    rmc_size, "mpi3_mgmt_req");
2764 		}
2765 	}
2766 
2767 	init_completion(&mrioc->bsg_cmds.done);
2768 	rval = mpi3mr_admin_request_post(mrioc, mpi_req,
2769 	    MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
2770 
2771 
2772 	if (rval) {
2773 		mrioc->bsg_cmds.is_waiting = 0;
2774 		dprint_bsg_err(mrioc,
2775 		    "%s: posting bsg request is failed\n", __func__);
2776 		rval = -EAGAIN;
2777 		goto out_unlock;
2778 	}
2779 	wait_for_completion_timeout(&mrioc->bsg_cmds.done,
2780 	    (karg->timeout * HZ));
2781 	if (block_io && stgt_priv)
2782 		atomic_dec(&stgt_priv->block_io);
2783 	if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
2784 		mrioc->bsg_cmds.is_waiting = 0;
2785 		rval = -EAGAIN;
2786 		if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
2787 			goto out_unlock;
2788 		if (((mpi_header->function != MPI3_FUNCTION_SCSI_IO) &&
2789 		    (mpi_header->function != MPI3_FUNCTION_NVME_ENCAPSULATED))
2790 		    || (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR)) {
2791 			ioc_info(mrioc, "%s: bsg request timedout after %d seconds\n",
2792 			    __func__, karg->timeout);
2793 			if (!(mrioc->logging_level & MPI3_DEBUG_BSG_INFO)) {
2794 				dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
2795 			    "bsg_mpi3_req");
2796 			if (mpi_header->function ==
2797 			    MPI3_FUNCTION_MGMT_PASSTHROUGH) {
2798 				drv_buf_iter = &drv_bufs[0];
2799 				dprint_dump(drv_buf_iter->kern_buf,
2800 				    rmc_size, "mpi3_mgmt_req");
2801 				}
2802 			}
2803 		}
2804 		if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
2805 			(mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) {
2806 			dprint_bsg_err(mrioc, "%s: bsg request timedout after %d seconds,\n"
2807 				"issuing target reset to (0x%04x)\n", __func__,
2808 				karg->timeout, mpi_header->function_dependent);
2809 			mpi3mr_issue_tm(mrioc,
2810 			    MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
2811 			    mpi_header->function_dependent, 0,
2812 			    MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
2813 			    &mrioc->host_tm_cmds, &resp_code, NULL);
2814 		}
2815 		if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
2816 		    !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
2817 			mpi3mr_soft_reset_handler(mrioc,
2818 			    MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
2819 		goto out_unlock;
2820 	}
2821 	dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
2822 
2823 	if (mrioc->prp_list_virt) {
2824 		dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
2825 		    mrioc->prp_list_virt, mrioc->prp_list_dma);
2826 		mrioc->prp_list_virt = NULL;
2827 	}
2828 
2829 	if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2830 	     != MPI3_IOCSTATUS_SUCCESS) {
2831 		dprint_bsg_info(mrioc,
2832 		    "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
2833 		    __func__,
2834 		    (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2835 		    mrioc->bsg_cmds.ioc_loginfo);
2836 	}
2837 
2838 	if ((mpirep_offset != 0xFF) &&
2839 	    drv_bufs[mpirep_offset].bsg_buf_len) {
2840 		drv_buf_iter = &drv_bufs[mpirep_offset];
2841 		drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) +
2842 					   mrioc->reply_sz);
2843 		bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
2844 
2845 		if (!bsg_reply_buf) {
2846 			rval = -ENOMEM;
2847 			goto out_unlock;
2848 		}
2849 		if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
2850 			bsg_reply_buf->mpi_reply_type =
2851 				MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
2852 			memcpy(bsg_reply_buf->reply_buf,
2853 			    mrioc->bsg_cmds.reply, mrioc->reply_sz);
2854 		} else {
2855 			bsg_reply_buf->mpi_reply_type =
2856 				MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
2857 			status_desc = (struct mpi3_status_reply_descriptor *)
2858 			    bsg_reply_buf->reply_buf;
2859 			status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
2860 			status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
2861 		}
2862 		tmplen = min(drv_buf_iter->kern_buf_len,
2863 			drv_buf_iter->bsg_buf_len);
2864 		memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
2865 	}
2866 
2867 	if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
2868 	    mrioc->bsg_cmds.is_sense) {
2869 		drv_buf_iter = &drv_bufs[erb_offset];
2870 		tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
2871 		memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
2872 	}
2873 
2874 	drv_buf_iter = drv_bufs;
2875 	for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
2876 		if (drv_buf_iter->data_dir == DMA_NONE)
2877 			continue;
2878 		if ((count == 1) && is_rmrb) {
2879 			memcpy(drv_buf_iter->bsg_buf,
2880 			    drv_buf_iter->kern_buf,
2881 			    drv_buf_iter->kern_buf_len);
2882 		} else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
2883 			tmplen = 0;
2884 			for (desc_count = 0;
2885 			    desc_count < drv_buf_iter->num_dma_desc;
2886 			    desc_count++) {
2887 				memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen),
2888 				       drv_buf_iter->dma_desc[desc_count].addr,
2889 				       drv_buf_iter->dma_desc[desc_count].size);
2890 				tmplen +=
2891 				    drv_buf_iter->dma_desc[desc_count].size;
2892 		}
2893 	}
2894 	}
2895 
2896 out_unlock:
2897 	if (din_buf) {
2898 		job->reply_payload_rcv_len =
2899 			sg_copy_from_buffer(job->reply_payload.sg_list,
2900 					    job->reply_payload.sg_cnt,
2901 					    din_buf, job->reply_payload.payload_len);
2902 	}
2903 	mrioc->bsg_cmds.is_sense = 0;
2904 	mrioc->bsg_cmds.sensebuf = NULL;
2905 	mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
2906 	mutex_unlock(&mrioc->bsg_cmds.mutex);
2907 out:
2908 	kfree(sense_buff_k);
2909 	kfree(dout_buf);
2910 	kfree(din_buf);
2911 	kfree(mpi_req);
2912 	if (drv_bufs) {
2913 		drv_buf_iter = drv_bufs;
2914 		for (count = 0; count < bufcnt; count++, drv_buf_iter++)
2915 			kfree(drv_buf_iter->dma_desc);
2916 		kfree(drv_bufs);
2917 	}
2918 	kfree(bsg_reply_buf);
2919 	return rval;
2920 }
2921 
2922 /**
2923  * mpi3mr_app_save_logdata - Save Log Data events
2924  * @mrioc: Adapter instance reference
2925  * @event_data: event data associated with log data event
2926  * @event_data_size: event data size to copy
2927  *
2928  * If log data event caching is enabled by the applicatiobns,
2929  * then this function saves the log data in the circular queue
2930  * and Sends async signal SIGIO to indicate there is an async
2931  * event from the firmware to the event monitoring applications.
2932  *
2933  * Return:Nothing
2934  */
mpi3mr_app_save_logdata(struct mpi3mr_ioc * mrioc,char * event_data,u16 event_data_size)2935 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
2936 	u16 event_data_size)
2937 {
2938 	u32 index = mrioc->logdata_buf_idx, sz;
2939 	struct mpi3mr_logdata_entry *entry;
2940 
2941 	if (!(mrioc->logdata_buf))
2942 		return;
2943 
2944 	entry = (struct mpi3mr_logdata_entry *)
2945 		(mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
2946 	entry->valid_entry = 1;
2947 	sz = min(mrioc->logdata_entry_sz, event_data_size);
2948 	memcpy(entry->data, event_data, sz);
2949 	mrioc->logdata_buf_idx =
2950 		((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
2951 	atomic64_inc(&event_counter);
2952 }
2953 
2954 /**
2955  * mpi3mr_bsg_request - bsg request entry point
2956  * @job: BSG job reference
2957  *
2958  * This is driver's entry point for bsg requests
2959  *
2960  * Return: 0 on success and proper error codes on failure
2961  */
mpi3mr_bsg_request(struct bsg_job * job)2962 static int mpi3mr_bsg_request(struct bsg_job *job)
2963 {
2964 	long rval = -EINVAL;
2965 	unsigned int reply_payload_rcv_len = 0;
2966 
2967 	struct mpi3mr_bsg_packet *bsg_req = job->request;
2968 
2969 	switch (bsg_req->cmd_type) {
2970 	case MPI3MR_DRV_CMD:
2971 		rval = mpi3mr_bsg_process_drv_cmds(job);
2972 		break;
2973 	case MPI3MR_MPT_CMD:
2974 		rval = mpi3mr_bsg_process_mpt_cmds(job);
2975 		break;
2976 	default:
2977 		pr_err("%s: unsupported BSG command(0x%08x)\n",
2978 		    MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
2979 		break;
2980 	}
2981 
2982 	bsg_job_done(job, rval, reply_payload_rcv_len);
2983 
2984 	return 0;
2985 }
2986 
2987 /**
2988  * mpi3mr_bsg_exit - de-registration from bsg layer
2989  * @mrioc: Adapter instance reference
2990  *
2991  * This will be called during driver unload and all
2992  * bsg resources allocated during load will be freed.
2993  *
2994  * Return:Nothing
2995  */
mpi3mr_bsg_exit(struct mpi3mr_ioc * mrioc)2996 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
2997 {
2998 	struct device *bsg_dev = &mrioc->bsg_dev;
2999 	if (!mrioc->bsg_queue)
3000 		return;
3001 
3002 	bsg_remove_queue(mrioc->bsg_queue);
3003 	mrioc->bsg_queue = NULL;
3004 
3005 	device_del(bsg_dev);
3006 	put_device(bsg_dev);
3007 }
3008 
3009 /**
3010  * mpi3mr_bsg_node_release -release bsg device node
3011  * @dev: bsg device node
3012  *
3013  * decrements bsg dev parent reference count
3014  *
3015  * Return:Nothing
3016  */
mpi3mr_bsg_node_release(struct device * dev)3017 static void mpi3mr_bsg_node_release(struct device *dev)
3018 {
3019 	put_device(dev->parent);
3020 }
3021 
3022 /**
3023  * mpi3mr_bsg_init -  registration with bsg layer
3024  * @mrioc: Adapter instance reference
3025  *
3026  * This will be called during driver load and it will
3027  * register driver with bsg layer
3028  *
3029  * Return:Nothing
3030  */
mpi3mr_bsg_init(struct mpi3mr_ioc * mrioc)3031 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
3032 {
3033 	struct device *bsg_dev = &mrioc->bsg_dev;
3034 	struct device *parent = &mrioc->shost->shost_gendev;
3035 	struct queue_limits lim = {
3036 		.max_hw_sectors		= MPI3MR_MAX_APP_XFER_SECTORS,
3037 		.max_segments		= MPI3MR_MAX_APP_XFER_SEGMENTS,
3038 	};
3039 	struct request_queue *q;
3040 
3041 	device_initialize(bsg_dev);
3042 
3043 	bsg_dev->parent = get_device(parent);
3044 	bsg_dev->release = mpi3mr_bsg_node_release;
3045 
3046 	dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
3047 
3048 	if (device_add(bsg_dev)) {
3049 		ioc_err(mrioc, "%s: bsg device add failed\n",
3050 		    dev_name(bsg_dev));
3051 		put_device(bsg_dev);
3052 		return;
3053 	}
3054 
3055 	q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
3056 			mpi3mr_bsg_request, NULL, 0);
3057 	if (IS_ERR(q)) {
3058 		ioc_err(mrioc, "%s: bsg registration failed\n",
3059 		    dev_name(bsg_dev));
3060 		device_del(bsg_dev);
3061 		put_device(bsg_dev);
3062 		return;
3063 	}
3064 
3065 	mrioc->bsg_queue = q;
3066 }
3067 
3068 /**
3069  * version_fw_show - SysFS callback for firmware version read
3070  * @dev: class device
3071  * @attr: Device attributes
3072  * @buf: Buffer to copy
3073  *
3074  * Return: sysfs_emit() return after copying firmware version
3075  */
3076 static ssize_t
version_fw_show(struct device * dev,struct device_attribute * attr,char * buf)3077 version_fw_show(struct device *dev, struct device_attribute *attr,
3078 	char *buf)
3079 {
3080 	struct Scsi_Host *shost = class_to_shost(dev);
3081 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3082 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3083 
3084 	return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
3085 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3086 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3087 }
3088 static DEVICE_ATTR_RO(version_fw);
3089 
3090 /**
3091  * fw_queue_depth_show - SysFS callback for firmware max cmds
3092  * @dev: class device
3093  * @attr: Device attributes
3094  * @buf: Buffer to copy
3095  *
3096  * Return: sysfs_emit() return after copying firmware max commands
3097  */
3098 static ssize_t
fw_queue_depth_show(struct device * dev,struct device_attribute * attr,char * buf)3099 fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
3100 			char *buf)
3101 {
3102 	struct Scsi_Host *shost = class_to_shost(dev);
3103 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3104 
3105 	return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
3106 }
3107 static DEVICE_ATTR_RO(fw_queue_depth);
3108 
3109 /**
3110  * op_req_q_count_show - SysFS callback for request queue count
3111  * @dev: class device
3112  * @attr: Device attributes
3113  * @buf: Buffer to copy
3114  *
3115  * Return: sysfs_emit() return after copying request queue count
3116  */
3117 static ssize_t
op_req_q_count_show(struct device * dev,struct device_attribute * attr,char * buf)3118 op_req_q_count_show(struct device *dev, struct device_attribute *attr,
3119 			char *buf)
3120 {
3121 	struct Scsi_Host *shost = class_to_shost(dev);
3122 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3123 
3124 	return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
3125 }
3126 static DEVICE_ATTR_RO(op_req_q_count);
3127 
3128 /**
3129  * reply_queue_count_show - SysFS callback for reply queue count
3130  * @dev: class device
3131  * @attr: Device attributes
3132  * @buf: Buffer to copy
3133  *
3134  * Return: sysfs_emit() return after copying reply queue count
3135  */
3136 static ssize_t
reply_queue_count_show(struct device * dev,struct device_attribute * attr,char * buf)3137 reply_queue_count_show(struct device *dev, struct device_attribute *attr,
3138 			char *buf)
3139 {
3140 	struct Scsi_Host *shost = class_to_shost(dev);
3141 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3142 
3143 	return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
3144 }
3145 
3146 static DEVICE_ATTR_RO(reply_queue_count);
3147 
3148 /**
3149  * reply_qfull_count_show - Show reply qfull count
3150  * @dev: class device
3151  * @attr: Device attributes
3152  * @buf: Buffer to copy
3153  *
3154  * Retrieves the current value of the reply_qfull_count from the mrioc structure and
3155  * formats it as a string for display.
3156  *
3157  * Return: sysfs_emit() return
3158  */
3159 static ssize_t
reply_qfull_count_show(struct device * dev,struct device_attribute * attr,char * buf)3160 reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
3161 			char *buf)
3162 {
3163 	struct Scsi_Host *shost = class_to_shost(dev);
3164 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3165 
3166 	return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
3167 }
3168 
3169 static DEVICE_ATTR_RO(reply_qfull_count);
3170 
3171 /**
3172  * logging_level_show - Show controller debug level
3173  * @dev: class device
3174  * @attr: Device attributes
3175  * @buf: Buffer to copy
3176  *
3177  * A sysfs 'read/write' shost attribute, to show the current
3178  * debug log level used by the driver for the specific
3179  * controller.
3180  *
3181  * Return: sysfs_emit() return
3182  */
3183 static ssize_t
logging_level_show(struct device * dev,struct device_attribute * attr,char * buf)3184 logging_level_show(struct device *dev,
3185 	struct device_attribute *attr, char *buf)
3186 
3187 {
3188 	struct Scsi_Host *shost = class_to_shost(dev);
3189 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3190 
3191 	return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
3192 }
3193 
3194 /**
3195  * logging_level_store- Change controller debug level
3196  * @dev: class device
3197  * @attr: Device attributes
3198  * @buf: Buffer to copy
3199  * @count: size of the buffer
3200  *
3201  * A sysfs 'read/write' shost attribute, to change the current
3202  * debug log level used by the driver for the specific
3203  * controller.
3204  *
3205  * Return: strlen() return
3206  */
3207 static ssize_t
logging_level_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3208 logging_level_store(struct device *dev,
3209 	struct device_attribute *attr,
3210 	const char *buf, size_t count)
3211 {
3212 	struct Scsi_Host *shost = class_to_shost(dev);
3213 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3214 	int val = 0;
3215 
3216 	if (kstrtoint(buf, 0, &val) != 0)
3217 		return -EINVAL;
3218 
3219 	mrioc->logging_level = val;
3220 	ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
3221 	return strlen(buf);
3222 }
3223 static DEVICE_ATTR_RW(logging_level);
3224 
3225 /**
3226  * adp_state_show() - SysFS callback for adapter state show
3227  * @dev: class device
3228  * @attr: Device attributes
3229  * @buf: Buffer to copy
3230  *
3231  * Return: sysfs_emit() return after copying adapter state
3232  */
3233 static ssize_t
adp_state_show(struct device * dev,struct device_attribute * attr,char * buf)3234 adp_state_show(struct device *dev, struct device_attribute *attr,
3235 	char *buf)
3236 {
3237 	struct Scsi_Host *shost = class_to_shost(dev);
3238 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3239 	enum mpi3mr_iocstate ioc_state;
3240 	uint8_t adp_state;
3241 
3242 	ioc_state = mpi3mr_get_iocstate(mrioc);
3243 	if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
3244 		adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
3245 	else if (mrioc->reset_in_progress || mrioc->stop_bsgs ||
3246 		 mrioc->block_on_pci_err)
3247 		adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
3248 	else if (ioc_state == MRIOC_STATE_FAULT)
3249 		adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
3250 	else
3251 		adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
3252 
3253 	return sysfs_emit(buf, "%u\n", adp_state);
3254 }
3255 
3256 static DEVICE_ATTR_RO(adp_state);
3257 
3258 static struct attribute *mpi3mr_host_attrs[] = {
3259 	&dev_attr_version_fw.attr,
3260 	&dev_attr_fw_queue_depth.attr,
3261 	&dev_attr_op_req_q_count.attr,
3262 	&dev_attr_reply_queue_count.attr,
3263 	&dev_attr_reply_qfull_count.attr,
3264 	&dev_attr_logging_level.attr,
3265 	&dev_attr_adp_state.attr,
3266 	NULL,
3267 };
3268 
3269 static const struct attribute_group mpi3mr_host_attr_group = {
3270 	.attrs = mpi3mr_host_attrs
3271 };
3272 
3273 const struct attribute_group *mpi3mr_host_groups[] = {
3274 	&mpi3mr_host_attr_group,
3275 	NULL,
3276 };
3277 
3278 
3279 /*
3280  * SCSI Device attributes under sysfs
3281  */
3282 
3283 /**
3284  * sas_address_show - SysFS callback for dev SASaddress display
3285  * @dev: class device
3286  * @attr: Device attributes
3287  * @buf: Buffer to copy
3288  *
3289  * Return: sysfs_emit() return after copying SAS address of the
3290  * specific SAS/SATA end device.
3291  */
3292 static ssize_t
sas_address_show(struct device * dev,struct device_attribute * attr,char * buf)3293 sas_address_show(struct device *dev, struct device_attribute *attr,
3294 			char *buf)
3295 {
3296 	struct scsi_device *sdev = to_scsi_device(dev);
3297 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3298 	struct mpi3mr_stgt_priv_data *tgt_priv_data;
3299 	struct mpi3mr_tgt_dev *tgtdev;
3300 
3301 	sdev_priv_data = sdev->hostdata;
3302 	if (!sdev_priv_data)
3303 		return 0;
3304 
3305 	tgt_priv_data = sdev_priv_data->tgt_priv_data;
3306 	if (!tgt_priv_data)
3307 		return 0;
3308 	tgtdev = tgt_priv_data->tgt_dev;
3309 	if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
3310 		return 0;
3311 	return sysfs_emit(buf, "0x%016llx\n",
3312 	    (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
3313 }
3314 
3315 static DEVICE_ATTR_RO(sas_address);
3316 
3317 /**
3318  * device_handle_show - SysFS callback for device handle display
3319  * @dev: class device
3320  * @attr: Device attributes
3321  * @buf: Buffer to copy
3322  *
3323  * Return: sysfs_emit() return after copying firmware internal
3324  * device handle of the specific device.
3325  */
3326 static ssize_t
device_handle_show(struct device * dev,struct device_attribute * attr,char * buf)3327 device_handle_show(struct device *dev, struct device_attribute *attr,
3328 			char *buf)
3329 {
3330 	struct scsi_device *sdev = to_scsi_device(dev);
3331 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3332 	struct mpi3mr_stgt_priv_data *tgt_priv_data;
3333 	struct mpi3mr_tgt_dev *tgtdev;
3334 
3335 	sdev_priv_data = sdev->hostdata;
3336 	if (!sdev_priv_data)
3337 		return 0;
3338 
3339 	tgt_priv_data = sdev_priv_data->tgt_priv_data;
3340 	if (!tgt_priv_data)
3341 		return 0;
3342 	tgtdev = tgt_priv_data->tgt_dev;
3343 	if (!tgtdev)
3344 		return 0;
3345 	return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
3346 }
3347 
3348 static DEVICE_ATTR_RO(device_handle);
3349 
3350 /**
3351  * persistent_id_show - SysFS callback for persisten ID display
3352  * @dev: class device
3353  * @attr: Device attributes
3354  * @buf: Buffer to copy
3355  *
3356  * Return: sysfs_emit() return after copying persistent ID of the
3357  * of the specific device.
3358  */
3359 static ssize_t
persistent_id_show(struct device * dev,struct device_attribute * attr,char * buf)3360 persistent_id_show(struct device *dev, struct device_attribute *attr,
3361 			char *buf)
3362 {
3363 	struct scsi_device *sdev = to_scsi_device(dev);
3364 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3365 	struct mpi3mr_stgt_priv_data *tgt_priv_data;
3366 	struct mpi3mr_tgt_dev *tgtdev;
3367 
3368 	sdev_priv_data = sdev->hostdata;
3369 	if (!sdev_priv_data)
3370 		return 0;
3371 
3372 	tgt_priv_data = sdev_priv_data->tgt_priv_data;
3373 	if (!tgt_priv_data)
3374 		return 0;
3375 	tgtdev = tgt_priv_data->tgt_dev;
3376 	if (!tgtdev)
3377 		return 0;
3378 	return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
3379 }
3380 static DEVICE_ATTR_RO(persistent_id);
3381 
3382 /**
3383  * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
3384  * @dev: pointer to embedded device
3385  * @attr: sas_ncq_prio_supported attribute descriptor
3386  * @buf: the buffer returned
3387  *
3388  * A sysfs 'read-only' sdev attribute, only works with SATA devices
3389  *
3390  * Returns: the number of characters written to @buf
3391  */
3392 static ssize_t
sas_ncq_prio_supported_show(struct device * dev,struct device_attribute * attr,char * buf)3393 sas_ncq_prio_supported_show(struct device *dev,
3394 			    struct device_attribute *attr, char *buf)
3395 {
3396 	struct scsi_device *sdev = to_scsi_device(dev);
3397 
3398 	return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
3399 }
3400 static DEVICE_ATTR_RO(sas_ncq_prio_supported);
3401 
3402 /**
3403  * sas_ncq_prio_enable_show - send prioritized io commands to device
3404  * @dev: pointer to embedded device
3405  * @attr: sas_ncq_prio_enable attribute descriptor
3406  * @buf: the buffer returned
3407  *
3408  * A sysfs 'read/write' sdev attribute, only works with SATA devices
3409  *
3410  * Returns: the number of characters written to @buf
3411  */
3412 static ssize_t
sas_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)3413 sas_ncq_prio_enable_show(struct device *dev,
3414 				 struct device_attribute *attr, char *buf)
3415 {
3416 	struct scsi_device *sdev = to_scsi_device(dev);
3417 	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
3418 
3419 	if (!sdev_priv_data)
3420 		return 0;
3421 
3422 	return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
3423 }
3424 
3425 static ssize_t
sas_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3426 sas_ncq_prio_enable_store(struct device *dev,
3427 				  struct device_attribute *attr,
3428 				  const char *buf, size_t count)
3429 {
3430 	struct scsi_device *sdev = to_scsi_device(dev);
3431 	struct mpi3mr_sdev_priv_data *sdev_priv_data =  sdev->hostdata;
3432 	bool ncq_prio_enable = 0;
3433 
3434 	if (kstrtobool(buf, &ncq_prio_enable))
3435 		return -EINVAL;
3436 
3437 	if (!sas_ata_ncq_prio_supported(sdev))
3438 		return -EINVAL;
3439 
3440 	sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
3441 
3442 	return strlen(buf);
3443 }
3444 static DEVICE_ATTR_RW(sas_ncq_prio_enable);
3445 
3446 static struct attribute *mpi3mr_dev_attrs[] = {
3447 	&dev_attr_sas_address.attr,
3448 	&dev_attr_device_handle.attr,
3449 	&dev_attr_persistent_id.attr,
3450 	&dev_attr_sas_ncq_prio_supported.attr,
3451 	&dev_attr_sas_ncq_prio_enable.attr,
3452 	NULL,
3453 };
3454 
3455 static const struct attribute_group mpi3mr_dev_attr_group = {
3456 	.attrs = mpi3mr_dev_attrs
3457 };
3458 
3459 const struct attribute_group *mpi3mr_dev_groups[] = {
3460 	&mpi3mr_dev_attr_group,
3461 	NULL,
3462 };
3463