1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
5 *
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
8 *
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 *
13 * Module Name:
14 * commctrl.c
15 *
16 * Abstract: Contains all routines for control of the AFA comm layer
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/completion.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h> /* ssleep prototype */
29 #include <linux/kthread.h>
30 #include <linux/uaccess.h>
31 #include <scsi/scsi_host.h>
32
33 #include "aacraid.h"
34
35 # define AAC_DEBUG_PREAMBLE KERN_INFO
36 # define AAC_DEBUG_POSTAMBLE
37 /**
38 * ioctl_send_fib - send a FIB from userspace
39 * @dev: adapter is being processed
40 * @arg: arguments to the ioctl call
41 *
42 * This routine sends a fib to the adapter on behalf of a user level
43 * program.
44 */
ioctl_send_fib(struct aac_dev * dev,void __user * arg)45 static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
46 {
47 struct hw_fib * kfib;
48 struct fib *fibptr;
49 struct hw_fib * hw_fib = (struct hw_fib *)0;
50 dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
51 unsigned int size, osize;
52 int retval;
53
54 if (dev->in_reset) {
55 return -EBUSY;
56 }
57 fibptr = aac_fib_alloc(dev);
58 if(fibptr == NULL) {
59 return -ENOMEM;
60 }
61
62 kfib = fibptr->hw_fib_va;
63 /*
64 * First copy in the header so that we can check the size field.
65 */
66 if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
67 aac_fib_free(fibptr);
68 return -EFAULT;
69 }
70 /*
71 * Since we copy based on the fib header size, make sure that we
72 * will not overrun the buffer when we copy the memory. Return
73 * an error if we would.
74 */
75 osize = size = le16_to_cpu(kfib->header.Size) +
76 sizeof(struct aac_fibhdr);
77 if (size < le16_to_cpu(kfib->header.SenderSize))
78 size = le16_to_cpu(kfib->header.SenderSize);
79 if (size > dev->max_fib_size) {
80 dma_addr_t daddr;
81
82 if (size > 2048) {
83 retval = -EINVAL;
84 goto cleanup;
85 }
86
87 kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
88 GFP_KERNEL);
89 if (!kfib) {
90 retval = -ENOMEM;
91 goto cleanup;
92 }
93
94 /* Highjack the hw_fib */
95 hw_fib = fibptr->hw_fib_va;
96 hw_fib_pa = fibptr->hw_fib_pa;
97 fibptr->hw_fib_va = kfib;
98 fibptr->hw_fib_pa = daddr;
99 memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
100 memcpy(kfib, hw_fib, dev->max_fib_size);
101 }
102
103 if (copy_from_user(kfib, arg, size)) {
104 retval = -EFAULT;
105 goto cleanup;
106 }
107
108 /* Sanity check the second copy */
109 if ((osize != le16_to_cpu(kfib->header.Size) +
110 sizeof(struct aac_fibhdr))
111 || (size < le16_to_cpu(kfib->header.SenderSize))) {
112 retval = -EINVAL;
113 goto cleanup;
114 }
115
116 if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
117 aac_adapter_interrupt(dev);
118 /*
119 * Since we didn't really send a fib, zero out the state to allow
120 * cleanup code not to assert.
121 */
122 kfib->header.XferState = 0;
123 } else {
124 retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
125 le16_to_cpu(kfib->header.Size) , FsaNormal,
126 1, 1, NULL, NULL);
127 if (retval) {
128 goto cleanup;
129 }
130 if (aac_fib_complete(fibptr) != 0) {
131 retval = -EINVAL;
132 goto cleanup;
133 }
134 }
135 /*
136 * Make sure that the size returned by the adapter (which includes
137 * the header) is less than or equal to the size of a fib, so we
138 * don't corrupt application data. Then copy that size to the user
139 * buffer. (Don't try to add the header information again, since it
140 * was already included by the adapter.)
141 */
142
143 retval = 0;
144 if (copy_to_user(arg, (void *)kfib, size))
145 retval = -EFAULT;
146 cleanup:
147 if (hw_fib) {
148 dma_free_coherent(&dev->pdev->dev, size, kfib,
149 fibptr->hw_fib_pa);
150 fibptr->hw_fib_pa = hw_fib_pa;
151 fibptr->hw_fib_va = hw_fib;
152 }
153 if (retval != -ERESTARTSYS)
154 aac_fib_free(fibptr);
155 return retval;
156 }
157
158 /**
159 * open_getadapter_fib - Get the next fib
160 * @dev: adapter is being processed
161 * @arg: arguments to the open call
162 *
163 * This routine will get the next Fib, if available, from the AdapterFibContext
164 * passed in from the user.
165 */
open_getadapter_fib(struct aac_dev * dev,void __user * arg)166 static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
167 {
168 struct aac_fib_context * fibctx;
169 int status;
170
171 fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
172 if (fibctx == NULL) {
173 status = -ENOMEM;
174 } else {
175 unsigned long flags;
176 struct list_head * entry;
177 struct aac_fib_context * context;
178
179 fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
180 fibctx->size = sizeof(struct aac_fib_context);
181 /*
182 * Yes yes, I know this could be an index, but we have a
183 * better guarantee of uniqueness for the locked loop below.
184 * Without the aid of a persistent history, this also helps
185 * reduce the chance that the opaque context would be reused.
186 */
187 fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
188 /*
189 * Initialize the mutex used to wait for the next AIF.
190 */
191 init_completion(&fibctx->completion);
192 fibctx->wait = 0;
193 /*
194 * Initialize the fibs and set the count of fibs on
195 * the list to 0.
196 */
197 fibctx->count = 0;
198 INIT_LIST_HEAD(&fibctx->fib_list);
199 fibctx->jiffies = jiffies/HZ;
200 /*
201 * Now add this context onto the adapter's
202 * AdapterFibContext list.
203 */
204 spin_lock_irqsave(&dev->fib_lock, flags);
205 /* Ensure that we have a unique identifier */
206 entry = dev->fib_list.next;
207 while (entry != &dev->fib_list) {
208 context = list_entry(entry, struct aac_fib_context, next);
209 if (context->unique == fibctx->unique) {
210 /* Not unique (32 bits) */
211 fibctx->unique++;
212 entry = dev->fib_list.next;
213 } else {
214 entry = entry->next;
215 }
216 }
217 list_add_tail(&fibctx->next, &dev->fib_list);
218 spin_unlock_irqrestore(&dev->fib_lock, flags);
219 if (copy_to_user(arg, &fibctx->unique,
220 sizeof(fibctx->unique))) {
221 status = -EFAULT;
222 } else {
223 status = 0;
224 }
225 }
226 return status;
227 }
228
229 /**
230 * next_getadapter_fib - get the next fib
231 * @dev: adapter to use
232 * @arg: ioctl argument
233 *
234 * This routine will get the next Fib, if available, from the AdapterFibContext
235 * passed in from the user.
236 */
next_getadapter_fib(struct aac_dev * dev,void __user * arg)237 static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
238 {
239 struct fib_ioctl f;
240 struct fib *fib;
241 struct aac_fib_context *fibctx;
242 int status;
243 struct list_head * entry;
244 unsigned long flags;
245
246 if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
247 return -EFAULT;
248 /*
249 * Verify that the HANDLE passed in was a valid AdapterFibContext
250 *
251 * Search the list of AdapterFibContext addresses on the adapter
252 * to be sure this is a valid address
253 */
254 spin_lock_irqsave(&dev->fib_lock, flags);
255 entry = dev->fib_list.next;
256 fibctx = NULL;
257
258 while (entry != &dev->fib_list) {
259 fibctx = list_entry(entry, struct aac_fib_context, next);
260 /*
261 * Extract the AdapterFibContext from the Input parameters.
262 */
263 if (fibctx->unique == f.fibctx) { /* We found a winner */
264 break;
265 }
266 entry = entry->next;
267 fibctx = NULL;
268 }
269 if (!fibctx) {
270 spin_unlock_irqrestore(&dev->fib_lock, flags);
271 dprintk ((KERN_INFO "Fib Context not found\n"));
272 return -EINVAL;
273 }
274
275 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
276 (fibctx->size != sizeof(struct aac_fib_context))) {
277 spin_unlock_irqrestore(&dev->fib_lock, flags);
278 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
279 return -EINVAL;
280 }
281 status = 0;
282 /*
283 * If there are no fibs to send back, then either wait or return
284 * -EAGAIN
285 */
286 return_fib:
287 if (!list_empty(&fibctx->fib_list)) {
288 /*
289 * Pull the next fib from the fibs
290 */
291 entry = fibctx->fib_list.next;
292 list_del(entry);
293
294 fib = list_entry(entry, struct fib, fiblink);
295 fibctx->count--;
296 spin_unlock_irqrestore(&dev->fib_lock, flags);
297 if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
298 kfree(fib->hw_fib_va);
299 kfree(fib);
300 return -EFAULT;
301 }
302 /*
303 * Free the space occupied by this copy of the fib.
304 */
305 kfree(fib->hw_fib_va);
306 kfree(fib);
307 status = 0;
308 } else {
309 spin_unlock_irqrestore(&dev->fib_lock, flags);
310 /* If someone killed the AIF aacraid thread, restart it */
311 status = !dev->aif_thread;
312 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
313 /* Be paranoid, be very paranoid! */
314 kthread_stop(dev->thread);
315 ssleep(1);
316 dev->aif_thread = 0;
317 dev->thread = kthread_run(aac_command_thread, dev,
318 "%s", dev->name);
319 ssleep(1);
320 }
321 if (f.wait) {
322 if (wait_for_completion_interruptible(&fibctx->completion) < 0) {
323 status = -ERESTARTSYS;
324 } else {
325 /* Lock again and retry */
326 spin_lock_irqsave(&dev->fib_lock, flags);
327 goto return_fib;
328 }
329 } else {
330 status = -EAGAIN;
331 }
332 }
333 fibctx->jiffies = jiffies/HZ;
334 return status;
335 }
336
aac_close_fib_context(struct aac_dev * dev,struct aac_fib_context * fibctx)337 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
338 {
339 struct fib *fib;
340
341 /*
342 * First free any FIBs that have not been consumed.
343 */
344 while (!list_empty(&fibctx->fib_list)) {
345 struct list_head * entry;
346 /*
347 * Pull the next fib from the fibs
348 */
349 entry = fibctx->fib_list.next;
350 list_del(entry);
351 fib = list_entry(entry, struct fib, fiblink);
352 fibctx->count--;
353 /*
354 * Free the space occupied by this copy of the fib.
355 */
356 kfree(fib->hw_fib_va);
357 kfree(fib);
358 }
359 /*
360 * Remove the Context from the AdapterFibContext List
361 */
362 list_del(&fibctx->next);
363 /*
364 * Invalidate context
365 */
366 fibctx->type = 0;
367 /*
368 * Free the space occupied by the Context
369 */
370 kfree(fibctx);
371 return 0;
372 }
373
374 /**
375 * close_getadapter_fib - close down user fib context
376 * @dev: adapter
377 * @arg: ioctl arguments
378 *
379 * This routine will close down the fibctx passed in from the user.
380 */
381
close_getadapter_fib(struct aac_dev * dev,void __user * arg)382 static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
383 {
384 struct aac_fib_context *fibctx;
385 int status;
386 unsigned long flags;
387 struct list_head * entry;
388
389 /*
390 * Verify that the HANDLE passed in was a valid AdapterFibContext
391 *
392 * Search the list of AdapterFibContext addresses on the adapter
393 * to be sure this is a valid address
394 */
395
396 entry = dev->fib_list.next;
397 fibctx = NULL;
398
399 while(entry != &dev->fib_list) {
400 fibctx = list_entry(entry, struct aac_fib_context, next);
401 /*
402 * Extract the fibctx from the input parameters
403 */
404 if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
405 break;
406 entry = entry->next;
407 fibctx = NULL;
408 }
409
410 if (!fibctx)
411 return 0; /* Already gone */
412
413 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
414 (fibctx->size != sizeof(struct aac_fib_context)))
415 return -EINVAL;
416 spin_lock_irqsave(&dev->fib_lock, flags);
417 status = aac_close_fib_context(dev, fibctx);
418 spin_unlock_irqrestore(&dev->fib_lock, flags);
419 return status;
420 }
421
422 /**
423 * check_revision - close down user fib context
424 * @dev: adapter
425 * @arg: ioctl arguments
426 *
427 * This routine returns the driver version.
428 * Under Linux, there have been no version incompatibilities, so this is
429 * simple!
430 */
431
check_revision(struct aac_dev * dev,void __user * arg)432 static int check_revision(struct aac_dev *dev, void __user *arg)
433 {
434 struct revision response;
435 char *driver_version = aac_driver_version;
436 u32 version;
437
438 response.compat = 1;
439 version = (simple_strtol(driver_version,
440 &driver_version, 10) << 24) | 0x00000400;
441 version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
442 version += simple_strtol(driver_version + 1, NULL, 10);
443 response.version = cpu_to_le32(version);
444 # ifdef AAC_DRIVER_BUILD
445 response.build = cpu_to_le32(AAC_DRIVER_BUILD);
446 # else
447 response.build = cpu_to_le32(9999);
448 # endif
449
450 if (copy_to_user(arg, &response, sizeof(response)))
451 return -EFAULT;
452 return 0;
453 }
454
455
456 /**
457 * aac_send_raw_scb
458 * @dev: adapter is being processed
459 * @arg: arguments to the send call
460 */
aac_send_raw_srb(struct aac_dev * dev,void __user * arg)461 static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
462 {
463 struct fib* srbfib;
464 int status;
465 struct aac_srb *srbcmd = NULL;
466 struct aac_hba_cmd_req *hbacmd = NULL;
467 struct user_aac_srb *user_srbcmd = NULL;
468 struct user_aac_srb __user *user_srb = arg;
469 struct aac_srb_reply __user *user_reply;
470 u32 chn;
471 u32 fibsize = 0;
472 u32 flags = 0;
473 s32 rcode = 0;
474 u32 data_dir;
475 void __user *sg_user[HBA_MAX_SG_EMBEDDED];
476 void *sg_list[HBA_MAX_SG_EMBEDDED];
477 u32 sg_count[HBA_MAX_SG_EMBEDDED];
478 u32 sg_indx = 0;
479 u32 byte_count = 0;
480 u32 actual_fibsize64, actual_fibsize = 0;
481 int i;
482 int is_native_device;
483 u64 address;
484
485
486 if (dev->in_reset) {
487 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
488 return -EBUSY;
489 }
490 if (!capable(CAP_SYS_ADMIN)){
491 dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
492 return -EPERM;
493 }
494 /*
495 * Allocate and initialize a Fib then setup a SRB command
496 */
497 if (!(srbfib = aac_fib_alloc(dev))) {
498 return -ENOMEM;
499 }
500
501 memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
502 if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
503 dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
504 rcode = -EFAULT;
505 goto cleanup;
506 }
507
508 if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
509 (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
510 rcode = -EINVAL;
511 goto cleanup;
512 }
513
514 user_srbcmd = memdup_user(user_srb, fibsize);
515 if (IS_ERR(user_srbcmd)) {
516 rcode = PTR_ERR(user_srbcmd);
517 user_srbcmd = NULL;
518 goto cleanup;
519 }
520
521 flags = user_srbcmd->flags; /* from user in cpu order */
522 switch (flags & (SRB_DataIn | SRB_DataOut)) {
523 case SRB_DataOut:
524 data_dir = DMA_TO_DEVICE;
525 break;
526 case (SRB_DataIn | SRB_DataOut):
527 data_dir = DMA_BIDIRECTIONAL;
528 break;
529 case SRB_DataIn:
530 data_dir = DMA_FROM_DEVICE;
531 break;
532 default:
533 data_dir = DMA_NONE;
534 }
535 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
536 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
537 user_srbcmd->sg.count));
538 rcode = -EINVAL;
539 goto cleanup;
540 }
541 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
542 dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
543 rcode = -EINVAL;
544 goto cleanup;
545 }
546 actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
547 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
548 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
549 (sizeof(struct sgentry64) - sizeof(struct sgentry));
550 /* User made a mistake - should not continue */
551 if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
552 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
553 "Raw SRB command calculated fibsize=%lu;%lu "
554 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
555 "issued fibsize=%d\n",
556 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
557 sizeof(struct aac_srb), sizeof(struct sgentry),
558 sizeof(struct sgentry64), fibsize));
559 rcode = -EINVAL;
560 goto cleanup;
561 }
562
563 chn = user_srbcmd->channel;
564 if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
565 dev->hba_map[chn][user_srbcmd->id].devtype ==
566 AAC_DEVTYPE_NATIVE_RAW) {
567 is_native_device = 1;
568 hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
569 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
570
571 /* iu_type is a parameter of aac_hba_send */
572 switch (data_dir) {
573 case DMA_TO_DEVICE:
574 hbacmd->byte1 = 2;
575 break;
576 case DMA_FROM_DEVICE:
577 case DMA_BIDIRECTIONAL:
578 hbacmd->byte1 = 1;
579 break;
580 case DMA_NONE:
581 default:
582 break;
583 }
584 hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
585 hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
586
587 /*
588 * we fill in reply_qid later in aac_src_deliver_message
589 * we fill in iu_type, request_id later in aac_hba_send
590 * we fill in emb_data_desc_count, data_length later
591 * in sg list build
592 */
593
594 memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
595
596 address = (u64)srbfib->hw_error_pa;
597 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
598 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
599 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
600 hbacmd->emb_data_desc_count =
601 cpu_to_le32(user_srbcmd->sg.count);
602 srbfib->hbacmd_size = 64 +
603 user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
604
605 } else {
606 is_native_device = 0;
607 aac_fib_init(srbfib);
608
609 /* raw_srb FIB is not FastResponseCapable */
610 srbfib->hw_fib_va->header.XferState &=
611 ~cpu_to_le32(FastResponseCapable);
612
613 srbcmd = (struct aac_srb *) fib_data(srbfib);
614
615 // Fix up srb for endian and force some values
616
617 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
618 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
619 srbcmd->id = cpu_to_le32(user_srbcmd->id);
620 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
621 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
622 srbcmd->flags = cpu_to_le32(flags);
623 srbcmd->retry_limit = 0; // Obsolete parameter
624 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
625 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
626 }
627
628 byte_count = 0;
629 if (is_native_device) {
630 struct user_sgmap *usg32 = &user_srbcmd->sg;
631 struct user_sgmap64 *usg64 =
632 (struct user_sgmap64 *)&user_srbcmd->sg;
633
634 for (i = 0; i < usg32->count; i++) {
635 void *p;
636 u64 addr;
637
638 sg_count[i] = (actual_fibsize64 == fibsize) ?
639 usg64->sg[i].count : usg32->sg[i].count;
640 if (sg_count[i] >
641 (dev->scsi_host_ptr->max_sectors << 9)) {
642 pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
643 i, sg_count[i],
644 dev->scsi_host_ptr->max_sectors << 9);
645 rcode = -EINVAL;
646 goto cleanup;
647 }
648
649 p = kmalloc(sg_count[i], GFP_KERNEL);
650 if (!p) {
651 rcode = -ENOMEM;
652 goto cleanup;
653 }
654
655 if (actual_fibsize64 == fibsize) {
656 addr = (u64)usg64->sg[i].addr[0];
657 addr += ((u64)usg64->sg[i].addr[1]) << 32;
658 } else {
659 addr = (u64)usg32->sg[i].addr;
660 }
661
662 sg_user[i] = (void __user *)(uintptr_t)addr;
663 sg_list[i] = p; // save so we can clean up later
664 sg_indx = i;
665
666 if (flags & SRB_DataOut) {
667 if (copy_from_user(p, sg_user[i],
668 sg_count[i])) {
669 rcode = -EFAULT;
670 goto cleanup;
671 }
672 }
673 addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
674 data_dir);
675 hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
676 hbacmd->sge[i].addr_lo = cpu_to_le32(
677 (u32)(addr & 0xffffffff));
678 hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
679 hbacmd->sge[i].flags = 0;
680 byte_count += sg_count[i];
681 }
682
683 if (usg32->count > 0) /* embedded sglist */
684 hbacmd->sge[usg32->count-1].flags =
685 cpu_to_le32(0x40000000);
686 hbacmd->data_length = cpu_to_le32(byte_count);
687
688 status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
689 NULL, NULL);
690
691 } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
692 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
693 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
694
695 /*
696 * This should also catch if user used the 32 bit sgmap
697 */
698 if (actual_fibsize64 == fibsize) {
699 actual_fibsize = actual_fibsize64;
700 for (i = 0; i < upsg->count; i++) {
701 u64 addr;
702 void* p;
703
704 sg_count[i] = upsg->sg[i].count;
705 if (sg_count[i] >
706 ((dev->adapter_info.options &
707 AAC_OPT_NEW_COMM) ?
708 (dev->scsi_host_ptr->max_sectors << 9) :
709 65536)) {
710 rcode = -EINVAL;
711 goto cleanup;
712 }
713
714 p = kmalloc(sg_count[i], GFP_KERNEL);
715 if(!p) {
716 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
717 sg_count[i], i, upsg->count));
718 rcode = -ENOMEM;
719 goto cleanup;
720 }
721 addr = (u64)upsg->sg[i].addr[0];
722 addr += ((u64)upsg->sg[i].addr[1]) << 32;
723 sg_user[i] = (void __user *)(uintptr_t)addr;
724 sg_list[i] = p; // save so we can clean up later
725 sg_indx = i;
726
727 if (flags & SRB_DataOut) {
728 if (copy_from_user(p, sg_user[i],
729 sg_count[i])){
730 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
731 rcode = -EFAULT;
732 goto cleanup;
733 }
734 }
735 addr = dma_map_single(&dev->pdev->dev, p,
736 sg_count[i], data_dir);
737
738 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
739 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
740 byte_count += sg_count[i];
741 psg->sg[i].count = cpu_to_le32(sg_count[i]);
742 }
743 } else {
744 struct user_sgmap* usg;
745 usg = kmemdup(upsg,
746 actual_fibsize - sizeof(struct aac_srb)
747 + sizeof(struct sgmap), GFP_KERNEL);
748 if (!usg) {
749 dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
750 rcode = -ENOMEM;
751 goto cleanup;
752 }
753 actual_fibsize = actual_fibsize64;
754
755 for (i = 0; i < usg->count; i++) {
756 u64 addr;
757 void* p;
758
759 sg_count[i] = usg->sg[i].count;
760 if (sg_count[i] >
761 ((dev->adapter_info.options &
762 AAC_OPT_NEW_COMM) ?
763 (dev->scsi_host_ptr->max_sectors << 9) :
764 65536)) {
765 kfree(usg);
766 rcode = -EINVAL;
767 goto cleanup;
768 }
769
770 p = kmalloc(sg_count[i], GFP_KERNEL);
771 if(!p) {
772 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
773 sg_count[i], i, usg->count));
774 kfree(usg);
775 rcode = -ENOMEM;
776 goto cleanup;
777 }
778 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
779 sg_list[i] = p; // save so we can clean up later
780 sg_indx = i;
781
782 if (flags & SRB_DataOut) {
783 if (copy_from_user(p, sg_user[i],
784 sg_count[i])) {
785 kfree (usg);
786 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
787 rcode = -EFAULT;
788 goto cleanup;
789 }
790 }
791 addr = dma_map_single(&dev->pdev->dev, p,
792 sg_count[i], data_dir);
793
794 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
795 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
796 byte_count += sg_count[i];
797 psg->sg[i].count = cpu_to_le32(sg_count[i]);
798 }
799 kfree (usg);
800 }
801 srbcmd->count = cpu_to_le32(byte_count);
802 if (user_srbcmd->sg.count)
803 psg->count = cpu_to_le32(sg_indx+1);
804 else
805 psg->count = 0;
806 status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
807 } else {
808 struct user_sgmap* upsg = &user_srbcmd->sg;
809 struct sgmap* psg = &srbcmd->sg;
810
811 if (actual_fibsize64 == fibsize) {
812 struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
813 for (i = 0; i < upsg->count; i++) {
814 uintptr_t addr;
815 void* p;
816
817 sg_count[i] = usg->sg[i].count;
818 if (sg_count[i] >
819 ((dev->adapter_info.options &
820 AAC_OPT_NEW_COMM) ?
821 (dev->scsi_host_ptr->max_sectors << 9) :
822 65536)) {
823 rcode = -EINVAL;
824 goto cleanup;
825 }
826 p = kmalloc(sg_count[i], GFP_KERNEL);
827 if (!p) {
828 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
829 sg_count[i], i, usg->count));
830 rcode = -ENOMEM;
831 goto cleanup;
832 }
833 addr = (u64)usg->sg[i].addr[0];
834 addr += ((u64)usg->sg[i].addr[1]) << 32;
835 sg_user[i] = (void __user *)addr;
836 sg_list[i] = p; // save so we can clean up later
837 sg_indx = i;
838
839 if (flags & SRB_DataOut) {
840 if (copy_from_user(p, sg_user[i],
841 sg_count[i])){
842 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
843 rcode = -EFAULT;
844 goto cleanup;
845 }
846 }
847 addr = dma_map_single(&dev->pdev->dev, p,
848 usg->sg[i].count,
849 data_dir);
850
851 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
852 byte_count += usg->sg[i].count;
853 psg->sg[i].count = cpu_to_le32(sg_count[i]);
854 }
855 } else {
856 for (i = 0; i < upsg->count; i++) {
857 dma_addr_t addr;
858 void* p;
859
860 sg_count[i] = upsg->sg[i].count;
861 if (sg_count[i] >
862 ((dev->adapter_info.options &
863 AAC_OPT_NEW_COMM) ?
864 (dev->scsi_host_ptr->max_sectors << 9) :
865 65536)) {
866 rcode = -EINVAL;
867 goto cleanup;
868 }
869 p = kmalloc(sg_count[i], GFP_KERNEL);
870 if (!p) {
871 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
872 sg_count[i], i, upsg->count));
873 rcode = -ENOMEM;
874 goto cleanup;
875 }
876 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
877 sg_list[i] = p; // save so we can clean up later
878 sg_indx = i;
879
880 if (flags & SRB_DataOut) {
881 if (copy_from_user(p, sg_user[i],
882 sg_count[i])) {
883 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
884 rcode = -EFAULT;
885 goto cleanup;
886 }
887 }
888 addr = dma_map_single(&dev->pdev->dev, p,
889 sg_count[i], data_dir);
890
891 psg->sg[i].addr = cpu_to_le32(addr);
892 byte_count += sg_count[i];
893 psg->sg[i].count = cpu_to_le32(sg_count[i]);
894 }
895 }
896 srbcmd->count = cpu_to_le32(byte_count);
897 if (user_srbcmd->sg.count)
898 psg->count = cpu_to_le32(sg_indx+1);
899 else
900 psg->count = 0;
901 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
902 }
903
904 if (status == -ERESTARTSYS) {
905 rcode = -ERESTARTSYS;
906 goto cleanup;
907 }
908
909 if (status != 0) {
910 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
911 rcode = -ENXIO;
912 goto cleanup;
913 }
914
915 if (flags & SRB_DataIn) {
916 for(i = 0 ; i <= sg_indx; i++){
917 if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
918 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
919 rcode = -EFAULT;
920 goto cleanup;
921
922 }
923 }
924 }
925
926 user_reply = arg + fibsize;
927 if (is_native_device) {
928 struct aac_hba_resp *err =
929 &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
930 struct aac_srb_reply reply;
931
932 memset(&reply, 0, sizeof(reply));
933 reply.status = ST_OK;
934 if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
935 /* fast response */
936 reply.srb_status = SRB_STATUS_SUCCESS;
937 reply.scsi_status = 0;
938 reply.data_xfer_length = byte_count;
939 reply.sense_data_size = 0;
940 memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
941 } else {
942 reply.srb_status = err->service_response;
943 reply.scsi_status = err->status;
944 reply.data_xfer_length = byte_count -
945 le32_to_cpu(err->residual_count);
946 reply.sense_data_size = err->sense_response_data_len;
947 memcpy(reply.sense_data, err->sense_response_buf,
948 AAC_SENSE_BUFFERSIZE);
949 }
950 if (copy_to_user(user_reply, &reply,
951 sizeof(struct aac_srb_reply))) {
952 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
953 rcode = -EFAULT;
954 goto cleanup;
955 }
956 } else {
957 struct aac_srb_reply *reply;
958
959 reply = (struct aac_srb_reply *) fib_data(srbfib);
960 if (copy_to_user(user_reply, reply,
961 sizeof(struct aac_srb_reply))) {
962 dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
963 rcode = -EFAULT;
964 goto cleanup;
965 }
966 }
967
968 cleanup:
969 kfree(user_srbcmd);
970 if (rcode != -ERESTARTSYS) {
971 for (i = 0; i <= sg_indx; i++)
972 kfree(sg_list[i]);
973 aac_fib_complete(srbfib);
974 aac_fib_free(srbfib);
975 }
976
977 return rcode;
978 }
979
980 struct aac_pci_info {
981 u32 bus;
982 u32 slot;
983 };
984
985
aac_get_pci_info(struct aac_dev * dev,void __user * arg)986 static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
987 {
988 struct aac_pci_info pci_info;
989
990 pci_info.bus = dev->pdev->bus->number;
991 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
992
993 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
994 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
995 return -EFAULT;
996 }
997 return 0;
998 }
999
aac_get_hba_info(struct aac_dev * dev,void __user * arg)1000 static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
1001 {
1002 struct aac_hba_info hbainfo;
1003
1004 memset(&hbainfo, 0, sizeof(hbainfo));
1005 hbainfo.adapter_number = (u8) dev->id;
1006 hbainfo.system_io_bus_number = dev->pdev->bus->number;
1007 hbainfo.device_number = (dev->pdev->devfn >> 3);
1008 hbainfo.function_number = (dev->pdev->devfn & 0x0007);
1009
1010 hbainfo.vendor_id = dev->pdev->vendor;
1011 hbainfo.device_id = dev->pdev->device;
1012 hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
1013 hbainfo.sub_system_id = dev->pdev->subsystem_device;
1014
1015 if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
1016 dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
1017 return -EFAULT;
1018 }
1019
1020 return 0;
1021 }
1022
1023 struct aac_reset_iop {
1024 u8 reset_type;
1025 };
1026
aac_send_reset_adapter(struct aac_dev * dev,void __user * arg)1027 static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
1028 {
1029 struct aac_reset_iop reset;
1030 int retval;
1031
1032 if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
1033 return -EFAULT;
1034
1035 dev->adapter_shutdown = 1;
1036
1037 mutex_unlock(&dev->ioctl_mutex);
1038 retval = aac_reset_adapter(dev, 0, reset.reset_type);
1039 mutex_lock(&dev->ioctl_mutex);
1040
1041 return retval;
1042 }
1043
aac_do_ioctl(struct aac_dev * dev,unsigned int cmd,void __user * arg)1044 int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg)
1045 {
1046 int status;
1047
1048 mutex_lock(&dev->ioctl_mutex);
1049
1050 if (dev->adapter_shutdown) {
1051 status = -EACCES;
1052 goto cleanup;
1053 }
1054
1055 /*
1056 * HBA gets first crack
1057 */
1058
1059 status = aac_dev_ioctl(dev, cmd, arg);
1060 if (status != -ENOTTY)
1061 goto cleanup;
1062
1063 switch (cmd) {
1064 case FSACTL_MINIPORT_REV_CHECK:
1065 status = check_revision(dev, arg);
1066 break;
1067 case FSACTL_SEND_LARGE_FIB:
1068 case FSACTL_SENDFIB:
1069 status = ioctl_send_fib(dev, arg);
1070 break;
1071 case FSACTL_OPEN_GET_ADAPTER_FIB:
1072 status = open_getadapter_fib(dev, arg);
1073 break;
1074 case FSACTL_GET_NEXT_ADAPTER_FIB:
1075 status = next_getadapter_fib(dev, arg);
1076 break;
1077 case FSACTL_CLOSE_GET_ADAPTER_FIB:
1078 status = close_getadapter_fib(dev, arg);
1079 break;
1080 case FSACTL_SEND_RAW_SRB:
1081 status = aac_send_raw_srb(dev,arg);
1082 break;
1083 case FSACTL_GET_PCI_INFO:
1084 status = aac_get_pci_info(dev,arg);
1085 break;
1086 case FSACTL_GET_HBA_INFO:
1087 status = aac_get_hba_info(dev, arg);
1088 break;
1089 case FSACTL_RESET_IOP:
1090 status = aac_send_reset_adapter(dev, arg);
1091 break;
1092
1093 default:
1094 status = -ENOTTY;
1095 break;
1096 }
1097
1098 cleanup:
1099 mutex_unlock(&dev->ioctl_mutex);
1100
1101 return status;
1102 }
1103
1104