Lines Matching +full:non +full:- +full:urgent

3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
65 * This is really, really weird shit - write() and writev() here
104 struct qib_devdata *dd = rcd->dd; in qib_get_base_info()
105 struct qib_pportdata *ppd = rcd->ppd; in qib_get_base_info()
110 subctxt_cnt = rcd->subctxt_cnt; in qib_get_base_info()
123 sz -= 7 * sizeof(u64); in qib_get_base_info()
125 ret = -EINVAL; in qib_get_base_info()
131 ret = -ENOMEM; in qib_get_base_info()
135 ret = dd->f_get_base_info(rcd, kinfo); in qib_get_base_info()
139 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt; in qib_get_base_info()
140 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize; in qib_get_base_info()
141 kinfo->spi_tidegrcnt = rcd->rcvegrcnt; in qib_get_base_info()
142 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize; in qib_get_base_info()
146 kinfo->spi_rcv_egrbuftotlen = in qib_get_base_info()
147 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; in qib_get_base_info()
148 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk; in qib_get_base_info()
149 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / in qib_get_base_info()
150 rcd->rcvegrbuf_chunks; in qib_get_base_info()
151 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt; in qib_get_base_info()
153 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt; in qib_get_base_info()
158 kinfo->spi_nctxts = dd->cfgctxts; in qib_get_base_info()
160 kinfo->spi_unit = dd->unit; in qib_get_base_info()
161 kinfo->spi_port = ppd->port; in qib_get_base_info()
163 kinfo->spi_tid_maxsize = PAGE_SIZE; in qib_get_base_info()
180 * since the chips with non-zero rhf_offset don't normally in qib_get_base_info()
184 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys; in qib_get_base_info()
185 kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys; in qib_get_base_info()
186 kinfo->spi_rhf_offset = dd->rhf_offset; in qib_get_base_info()
187 kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys; in qib_get_base_info()
188 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys; in qib_get_base_info()
189 /* setup per-unit (not port) status area for user programs */ in qib_get_base_info()
190 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + in qib_get_base_info()
191 (char *) ppd->statusp - in qib_get_base_info()
192 (char *) dd->pioavailregs_dma; in qib_get_base_info()
193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_get_base_info()
195 kinfo->spi_piocnt = rcd->piocnt; in qib_get_base_info()
196 kinfo->spi_piobufbase = (u64) rcd->piobufs; in qib_get_base_info()
197 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask); in qib_get_base_info()
199 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) + in qib_get_base_info()
200 (rcd->piocnt % subctxt_cnt); in qib_get_base_info()
202 kinfo->spi_piobufbase = (u64) rcd->piobufs + in qib_get_base_info()
203 dd->palign * in qib_get_base_info()
204 (rcd->piocnt - kinfo->spi_piocnt); in qib_get_base_info()
206 unsigned slave = subctxt_fp(fp) - 1; in qib_get_base_info()
208 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt; in qib_get_base_info()
209 kinfo->spi_piobufbase = (u64) rcd->piobufs + in qib_get_base_info()
210 dd->palign * kinfo->spi_piocnt * slave; in qib_get_base_info()
214 kinfo->spi_sendbuf_status = in qib_get_base_info()
215 cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]); in qib_get_base_info()
217 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase); in qib_get_base_info()
219 kinfo->spi_subctxt_rcvegrbuf = in qib_get_base_info()
220 cvt_kvaddr(rcd->subctxt_rcvegrbuf); in qib_get_base_info()
221 kinfo->spi_subctxt_rcvhdr_base = in qib_get_base_info()
222 cvt_kvaddr(rcd->subctxt_rcvhdr_base); in qib_get_base_info()
231 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) / in qib_get_base_info()
232 dd->palign; in qib_get_base_info()
233 kinfo->spi_pioalign = dd->palign; in qib_get_base_info()
234 kinfo->spi_qpair = QIB_KD_QP; in qib_get_base_info()
240 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32); in qib_get_base_info()
241 kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */ in qib_get_base_info()
242 kinfo->spi_ctxt = rcd->ctxt; in qib_get_base_info()
243 kinfo->spi_subctxt = subctxt_fp(fp); in qib_get_base_info()
244 kinfo->spi_sw_version = QIB_KERN_SWVERSION; in qib_get_base_info()
245 kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */ in qib_get_base_info()
246 kinfo->spi_hw_version = dd->revision; in qib_get_base_info()
249 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER; in qib_get_base_info()
253 ret = -EFAULT; in qib_get_base_info()
260 * qib_tid_update - update a context TID
290 struct qib_devdata *dd = rcd->dd; in qib_tid_update()
298 if (!dd->pageshadow) { in qib_tid_update()
299 ret = -ENOMEM; in qib_tid_update()
303 cnt = ti->tidcnt; in qib_tid_update()
305 ret = -EFAULT; in qib_tid_update()
308 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_update()
309 if (!rcd->subctxt_cnt) { in qib_tid_update()
310 tidcnt = dd->rcvtidcnt; in qib_tid_update()
311 tid = rcd->tidcursor; in qib_tid_update()
314 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + in qib_tid_update()
315 (dd->rcvtidcnt % rcd->subctxt_cnt); in qib_tid_update()
316 tidoff = dd->rcvtidcnt - tidcnt; in qib_tid_update()
320 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; in qib_tid_update()
321 tidoff = tidcnt * (subctxt - 1); in qib_tid_update()
327 qib_devinfo(dd->pcidev, in qib_tid_update()
332 pagep = (struct page **) rcd->tid_pg_list; in qib_tid_update()
333 tidlist = (u16 *) &pagep[dd->rcvtidcnt]; in qib_tid_update()
340 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) + in qib_tid_update()
341 dd->rcvtidbase + in qib_tid_update()
345 vaddr = ti->tidvaddr; in qib_tid_update()
348 ret = -EFAULT; in qib_tid_update()
354 * if (ret == -EBUSY) in qib_tid_update()
361 dd->pcidev, in qib_tid_update()
363 (void *) vaddr, cnt, -ret); in qib_tid_update()
369 for (; ntids--; tid++) { in qib_tid_update()
372 if (!dd->pageshadow[ctxttid + tid]) in qib_tid_update()
381 i--; /* last tidlist[i] not filled in */ in qib_tid_update()
382 ret = -ENOMEM; in qib_tid_update()
385 ret = qib_map_page(dd->pcidev, pagep[i], &daddr); in qib_tid_update()
391 dd->pageshadow[ctxttid + tid] = pagep[i]; in qib_tid_update()
392 dd->physshadow[ctxttid + tid] = daddr; in qib_tid_update()
397 physaddr = dd->physshadow[ctxttid + tid]; in qib_tid_update()
399 dd->f_put_tid(dd, &tidbase[tid], in qib_tid_update()
421 if (dd->pageshadow[ctxttid + tid]) { in qib_tid_update()
424 phys = dd->physshadow[ctxttid + tid]; in qib_tid_update()
425 dd->physshadow[ctxttid + tid] = dd->tidinvalid; in qib_tid_update()
429 dd->f_put_tid(dd, &tidbase[tid], in qib_tid_update()
431 dd->tidinvalid); in qib_tid_update()
432 dma_unmap_page(&dd->pcidev->dev, phys, in qib_tid_update()
434 dd->pageshadow[ctxttid + tid] = NULL; in qib_tid_update()
445 (unsigned long) ti->tidlist, in qib_tid_update()
447 ret = -EFAULT; in qib_tid_update()
450 if (copy_to_user(u64_to_user_ptr(ti->tidmap), in qib_tid_update()
452 ret = -EFAULT; in qib_tid_update()
457 if (!rcd->subctxt_cnt) in qib_tid_update()
458 rcd->tidcursor = tid; in qib_tid_update()
468 * qib_tid_free - free a context TID
488 struct qib_devdata *dd = rcd->dd; in qib_tid_free()
492 if (!dd->pageshadow) { in qib_tid_free()
493 ret = -ENOMEM; in qib_tid_free()
497 if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap), in qib_tid_free()
499 ret = -EFAULT; in qib_tid_free()
503 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_free()
504 if (!rcd->subctxt_cnt) in qib_tid_free()
505 tidcnt = dd->rcvtidcnt; in qib_tid_free()
507 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + in qib_tid_free()
508 (dd->rcvtidcnt % rcd->subctxt_cnt); in qib_tid_free()
509 ctxttid += dd->rcvtidcnt - tidcnt; in qib_tid_free()
511 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; in qib_tid_free()
512 ctxttid += tidcnt * (subctxt - 1); in qib_tid_free()
514 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + in qib_tid_free()
515 dd->rcvtidbase + in qib_tid_free()
534 if (dd->pageshadow[ctxttid + tid]) { in qib_tid_free()
538 p = dd->pageshadow[ctxttid + tid]; in qib_tid_free()
539 dd->pageshadow[ctxttid + tid] = NULL; in qib_tid_free()
540 phys = dd->physshadow[ctxttid + tid]; in qib_tid_free()
541 dd->physshadow[ctxttid + tid] = dd->tidinvalid; in qib_tid_free()
545 dd->f_put_tid(dd, &tidbase[tid], in qib_tid_free()
546 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); in qib_tid_free()
547 dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE, in qib_tid_free()
557 * qib_set_part_key - set a partition key
566 * mechanism to de-allocate a pkey at this time, we may eventually need to
574 struct qib_pportdata *ppd = rcd->ppd; in qib_set_part_key()
575 int i, pidx = -1; in qib_set_part_key()
584 return -EINVAL; in qib_set_part_key()
594 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { in qib_set_part_key()
595 if (!rcd->pkeys[i] && pidx == -1) in qib_set_part_key()
597 if (rcd->pkeys[i] == key) in qib_set_part_key()
598 return -EEXIST; in qib_set_part_key()
600 if (pidx == -1) in qib_set_part_key()
601 return -EBUSY; in qib_set_part_key()
602 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { in qib_set_part_key()
603 if (!ppd->pkeys[i]) { in qib_set_part_key()
607 if (ppd->pkeys[i] == key) { in qib_set_part_key()
608 atomic_t *pkrefs = &ppd->pkeyrefs[i]; in qib_set_part_key()
611 rcd->pkeys[pidx] = key; in qib_set_part_key()
620 if ((ppd->pkeys[i] & 0x7FFF) == lkey) in qib_set_part_key()
626 return -EEXIST; in qib_set_part_key()
629 return -EBUSY; in qib_set_part_key()
630 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { in qib_set_part_key()
631 if (!ppd->pkeys[i] && in qib_set_part_key()
632 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { in qib_set_part_key()
633 rcd->pkeys[pidx] = key; in qib_set_part_key()
634 ppd->pkeys[i] = key; in qib_set_part_key()
635 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); in qib_set_part_key()
639 return -EBUSY; in qib_set_part_key()
643 * qib_manage_rcvq - manage a context's receive queue
649 * overflow conditions. start_stop==1 re-enables, to be used to
650 * re-init the software copy of the head register
655 struct qib_devdata *dd = rcd->dd; in qib_manage_rcvq()
663 * On enable, force in-memory copy of the tail register to in qib_manage_rcvq()
665 * whether or not the chip has yet updated the in-memory in qib_manage_rcvq()
670 if (rcd->rcvhdrtail_kvaddr) in qib_manage_rcvq()
675 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); in qib_manage_rcvq()
685 struct qib_pportdata *ppd = rcd->ppd; in qib_clean_part_key()
687 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { in qib_clean_part_key()
688 if (!rcd->pkeys[i]) in qib_clean_part_key()
690 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) { in qib_clean_part_key()
692 if ((ppd->pkeys[j] & 0x7fff) != in qib_clean_part_key()
693 (rcd->pkeys[i] & 0x7fff)) in qib_clean_part_key()
695 if (atomic_dec_and_test(&ppd->pkeyrefs[j])) { in qib_clean_part_key()
696 ppd->pkeys[j] = 0; in qib_clean_part_key()
701 rcd->pkeys[i] = 0; in qib_clean_part_key()
704 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); in qib_clean_part_key()
711 struct qib_devdata *dd = rcd->dd; in qib_mmap_mem()
715 if ((vma->vm_end - vma->vm_start) > len) { in qib_mmap_mem()
716 qib_devinfo(dd->pcidev, in qib_mmap_mem()
718 vma->vm_end - vma->vm_start, len); in qib_mmap_mem()
719 ret = -EFAULT; in qib_mmap_mem()
728 if (vma->vm_flags & VM_WRITE) { in qib_mmap_mem()
729 qib_devinfo(dd->pcidev, in qib_mmap_mem()
731 ret = -EPERM; in qib_mmap_mem()
740 ret = remap_pfn_range(vma, vma->vm_start, pfn, in qib_mmap_mem()
741 len, vma->vm_page_prot); in qib_mmap_mem()
743 qib_devinfo(dd->pcidev, in qib_mmap_mem()
745 what, rcd->ctxt, pfn, len, ret); in qib_mmap_mem()
762 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; in mmap_ureg()
763 if ((vma->vm_end - vma->vm_start) > sz) { in mmap_ureg()
764 qib_devinfo(dd->pcidev, in mmap_ureg()
766 vma->vm_end - vma->vm_start); in mmap_ureg()
767 ret = -EFAULT; in mmap_ureg()
769 phys = dd->physaddr + ureg; in mmap_ureg()
770 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in mmap_ureg()
773 ret = io_remap_pfn_range(vma, vma->vm_start, in mmap_ureg()
775 vma->vm_end - vma->vm_start, in mmap_ureg()
776 vma->vm_page_prot); in mmap_ureg()
795 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { in mmap_piobufs()
796 qib_devinfo(dd->pcidev, in mmap_piobufs()
798 vma->vm_end - vma->vm_start); in mmap_piobufs()
799 ret = -EINVAL; in mmap_piobufs()
803 phys = dd->physaddr + piobufs; in mmap_piobufs()
806 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in mmap_piobufs()
816 if (!dd->wc_cookie) in mmap_piobufs()
817 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in mmap_piobufs()
819 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, in mmap_piobufs()
820 vma->vm_end - vma->vm_start, in mmap_piobufs()
821 vma->vm_page_prot); in mmap_piobufs()
829 struct qib_devdata *dd = rcd->dd; in mmap_rcvegrbufs()
835 size = rcd->rcvegrbuf_size; in mmap_rcvegrbufs()
836 total_size = rcd->rcvegrbuf_chunks * size; in mmap_rcvegrbufs()
837 if ((vma->vm_end - vma->vm_start) > total_size) { in mmap_rcvegrbufs()
838 qib_devinfo(dd->pcidev, in mmap_rcvegrbufs()
840 vma->vm_end - vma->vm_start, in mmap_rcvegrbufs()
842 ret = -EINVAL; in mmap_rcvegrbufs()
846 if (vma->vm_flags & VM_WRITE) { in mmap_rcvegrbufs()
847 qib_devinfo(dd->pcidev, in mmap_rcvegrbufs()
849 vma->vm_flags); in mmap_rcvegrbufs()
850 ret = -EPERM; in mmap_rcvegrbufs()
856 start = vma->vm_start; in mmap_rcvegrbufs()
858 for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) { in mmap_rcvegrbufs()
859 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT; in mmap_rcvegrbufs()
861 vma->vm_page_prot); in mmap_rcvegrbufs()
872 * qib_file_vma_fault - handle a VMA page fault.
878 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); in qib_file_vma_fault()
883 vmf->page = page; in qib_file_vma_fault()
895 struct qib_devdata *dd = rcd->dd; in mmap_kvaddr()
902 subctxt_cnt = rcd->subctxt_cnt; in mmap_kvaddr()
903 size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; in mmap_kvaddr()
907 * rcvegrbufs mmapped - as an array for all the processes, in mmap_kvaddr()
910 if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) { in mmap_kvaddr()
911 addr = rcd->subctxt_uregbase; in mmap_kvaddr()
913 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) { in mmap_kvaddr()
914 addr = rcd->subctxt_rcvhdr_base; in mmap_kvaddr()
915 size = rcd->rcvhdrq_size * subctxt_cnt; in mmap_kvaddr()
916 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) { in mmap_kvaddr()
917 addr = rcd->subctxt_rcvegrbuf; in mmap_kvaddr()
919 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase + in mmap_kvaddr()
921 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt; in mmap_kvaddr()
923 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base + in mmap_kvaddr()
924 rcd->rcvhdrq_size * subctxt)) { in mmap_kvaddr()
925 addr = rcd->subctxt_rcvhdr_base + in mmap_kvaddr()
926 rcd->rcvhdrq_size * subctxt; in mmap_kvaddr()
927 size = rcd->rcvhdrq_size; in mmap_kvaddr()
928 } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) { in mmap_kvaddr()
929 addr = rcd->user_event_mask; in mmap_kvaddr()
931 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf + in mmap_kvaddr()
933 addr = rcd->subctxt_rcvegrbuf + size * subctxt; in mmap_kvaddr()
934 /* rcvegrbufs are read-only on the slave */ in mmap_kvaddr()
935 if (vma->vm_flags & VM_WRITE) { in mmap_kvaddr()
936 qib_devinfo(dd->pcidev, in mmap_kvaddr()
938 vma->vm_flags); in mmap_kvaddr()
939 ret = -EPERM; in mmap_kvaddr()
949 len = vma->vm_end - vma->vm_start; in mmap_kvaddr()
951 ret = -EINVAL; in mmap_kvaddr()
955 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; in mmap_kvaddr()
956 vma->vm_ops = &qib_file_vm_ops; in mmap_kvaddr()
965 * qib_mmapf - mmap various structures into user space
970 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
983 if (!rcd || !(vma->vm_flags & VM_SHARED)) { in qib_mmapf()
984 ret = -EINVAL; in qib_mmapf()
987 dd = rcd->dd; in qib_mmapf()
991 * and per-context user registers into the user process. The address in qib_mmapf()
995 * For non-shared or master ctxts, this is a physical address. in qib_mmapf()
998 pgaddr = vma->vm_pgoff << PAGE_SHIFT; in qib_mmapf()
1005 ret = -EINVAL; in qib_mmapf()
1021 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_mmapf()
1022 if (!rcd->subctxt_cnt) { in qib_mmapf()
1024 piocnt = rcd->piocnt; in qib_mmapf()
1025 piobufs = rcd->piobufs; in qib_mmapf()
1028 piocnt = (rcd->piocnt / rcd->subctxt_cnt) + in qib_mmapf()
1029 (rcd->piocnt % rcd->subctxt_cnt); in qib_mmapf()
1030 piobufs = rcd->piobufs + in qib_mmapf()
1031 dd->palign * (rcd->piocnt - piocnt); in qib_mmapf()
1033 unsigned slave = subctxt_fp(fp) - 1; in qib_mmapf()
1036 piocnt = rcd->piocnt / rcd->subctxt_cnt; in qib_mmapf()
1037 piobufs = rcd->piobufs + dd->palign * piocnt * slave; in qib_mmapf()
1044 else if (pgaddr == dd->pioavailregs_phys) in qib_mmapf()
1045 /* in-memory copy of pioavail registers */ in qib_mmapf()
1047 (void *) dd->pioavailregs_dma, 0, in qib_mmapf()
1049 else if (pgaddr == rcd->rcvegr_phys) in qib_mmapf()
1051 else if (pgaddr == (u64) rcd->rcvhdrq_phys) in qib_mmapf()
1057 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size, in qib_mmapf()
1058 rcd->rcvhdrq, 1, "rcvhdrq"); in qib_mmapf()
1059 else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys) in qib_mmapf()
1060 /* in-memory copy of rcvhdrq tail register */ in qib_mmapf()
1062 rcd->rcvhdrtail_kvaddr, 0, in qib_mmapf()
1067 ret = -EINVAL; in qib_mmapf()
1069 vma->vm_private_data = NULL; in qib_mmapf()
1072 qib_devinfo(dd->pcidev, in qib_mmapf()
1074 -ret, (unsigned long long)pgaddr, in qib_mmapf()
1075 vma->vm_end - vma->vm_start); in qib_mmapf()
1084 struct qib_devdata *dd = rcd->dd; in qib_poll_urgent()
1087 poll_wait(fp, &rcd->wait, pt); in qib_poll_urgent()
1089 spin_lock_irq(&dd->uctxt_lock); in qib_poll_urgent()
1090 if (rcd->urgent != rcd->urgent_poll) { in qib_poll_urgent()
1092 rcd->urgent_poll = rcd->urgent; in qib_poll_urgent()
1095 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag); in qib_poll_urgent()
1097 spin_unlock_irq(&dd->uctxt_lock); in qib_poll_urgent()
1106 struct qib_devdata *dd = rcd->dd; in qib_poll_next()
1109 poll_wait(fp, &rcd->wait, pt); in qib_poll_next()
1111 spin_lock_irq(&dd->uctxt_lock); in qib_poll_next()
1112 if (dd->f_hdrqempty(rcd)) { in qib_poll_next()
1113 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag); in qib_poll_next()
1114 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); in qib_poll_next()
1118 spin_unlock_irq(&dd->uctxt_lock); in qib_poll_next()
1131 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) in qib_poll()
1133 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) in qib_poll()
1143 struct qib_filedata *fd = fp->private_data; in assign_ctxt_affinity()
1144 const unsigned int weight = current->nr_cpus_allowed; in assign_ctxt_affinity()
1145 const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); in assign_ctxt_affinity()
1156 fd->rec_cpu_num = local_cpu; in assign_ctxt_affinity()
1177 current->pid); in assign_ctxt_affinity()
1180 fd->rec_cpu_num = cpu; in assign_ctxt_affinity()
1190 /* this code is written long-hand for clarity */ in qib_compatible_subctxts()
1226 if (uinfo->spu_subctxt_cnt <= 0) in init_subctxts()
1228 num_subctxts = uinfo->spu_subctxt_cnt; in init_subctxts()
1231 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, in init_subctxts()
1232 uinfo->spu_userversion & 0xffff)) { in init_subctxts()
1233 qib_devinfo(dd->pcidev, in init_subctxts()
1235 (int) (uinfo->spu_userversion >> 16), in init_subctxts()
1236 (int) (uinfo->spu_userversion & 0xffff), in init_subctxts()
1241 ret = -EINVAL; in init_subctxts()
1245 rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts); in init_subctxts()
1246 if (!rcd->subctxt_uregbase) { in init_subctxts()
1247 ret = -ENOMEM; in init_subctxts()
1250 /* Note: rcd->rcvhdrq_size isn't initialized yet. */ in init_subctxts()
1251 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * in init_subctxts()
1253 rcd->subctxt_rcvhdr_base = vmalloc_user(size); in init_subctxts()
1254 if (!rcd->subctxt_rcvhdr_base) { in init_subctxts()
1255 ret = -ENOMEM; in init_subctxts()
1259 rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks * in init_subctxts()
1260 rcd->rcvegrbuf_size * in init_subctxts()
1262 if (!rcd->subctxt_rcvegrbuf) { in init_subctxts()
1263 ret = -ENOMEM; in init_subctxts()
1267 rcd->subctxt_cnt = uinfo->spu_subctxt_cnt; in init_subctxts()
1268 rcd->subctxt_id = uinfo->spu_subctxt_id; in init_subctxts()
1269 rcd->active_slaves = 1; in init_subctxts()
1270 rcd->redirect_seq_cnt = 1; in init_subctxts()
1271 set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); in init_subctxts()
1275 vfree(rcd->subctxt_rcvhdr_base); in init_subctxts()
1277 vfree(rcd->subctxt_uregbase); in init_subctxts()
1278 rcd->subctxt_uregbase = NULL; in init_subctxts()
1286 struct qib_filedata *fd = fp->private_data; in setup_ctxt()
1287 struct qib_devdata *dd = ppd->dd; in setup_ctxt()
1295 numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ? in setup_ctxt()
1296 cpu_to_node(fd->rec_cpu_num) : in setup_ctxt()
1297 numa_node_id()) : dd->assigned_node_id; in setup_ctxt()
1306 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + in setup_ctxt()
1307 dd->rcvtidcnt * sizeof(struct page **), in setup_ctxt()
1313 ret = -ENOMEM; in setup_ctxt()
1316 rcd->userversion = uinfo->spu_userversion; in setup_ctxt()
1320 rcd->tid_pg_list = ptmp; in setup_ctxt()
1321 rcd->pid = current->pid; in setup_ctxt()
1322 init_waitqueue_head(&dd->rcd[ctxt]->wait); in setup_ctxt()
1323 get_task_comm(rcd->comm, current); in setup_ctxt()
1326 dd->freectxts--; in setup_ctxt()
1331 if (fd->rec_cpu_num != -1) in setup_ctxt()
1332 __clear_bit(fd->rec_cpu_num, qib_cpulist); in setup_ctxt()
1334 dd->rcd[ctxt] = NULL; in setup_ctxt()
1343 struct qib_devdata *dd = ppd->dd; in usable()
1345 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && in usable()
1346 (ppd->lflags & QIBL_LINKACTIVE); in usable()
1360 if (!usable(dd->pport + port - 1)) { in choose_port_ctxt()
1361 ret = -ENETDOWN; in choose_port_ctxt()
1364 ppd = dd->pport + port - 1; in choose_port_ctxt()
1366 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; in choose_port_ctxt()
1369 if (ctxt == dd->cfgctxts) { in choose_port_ctxt()
1370 ret = -EBUSY; in choose_port_ctxt()
1374 u32 pidx = ctxt % dd->num_pports; in choose_port_ctxt()
1376 if (usable(dd->pport + pidx)) in choose_port_ctxt()
1377 ppd = dd->pport + pidx; in choose_port_ctxt()
1379 for (pidx = 0; pidx < dd->num_pports && !ppd; in choose_port_ctxt()
1381 if (usable(dd->pport + pidx)) in choose_port_ctxt()
1382 ppd = dd->pport + pidx; in choose_port_ctxt()
1385 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; in choose_port_ctxt()
1396 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) in find_free_ctxt()
1397 ret = -ENODEV; in find_free_ctxt()
1399 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); in find_free_ctxt()
1409 u32 port = uinfo->spu_port, ctxt; in get_a_ctxt()
1413 ret = -ENXIO; in get_a_ctxt()
1417 ret = -ENETDOWN; in get_a_ctxt()
1431 if (port && port <= dd->num_pports && in get_a_ctxt()
1432 usable(dd->pport + port - 1)) in get_a_ctxt()
1435 for (i = 0; i < dd->num_pports; i++) in get_a_ctxt()
1436 if (usable(dd->pport + i)) in get_a_ctxt()
1440 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; in get_a_ctxt()
1442 if (dd->rcd[ctxt]) in get_a_ctxt()
1463 if (ret == -EBUSY) in get_a_ctxt()
1468 ret = dusable ? -EBUSY : -ENETDOWN; in get_a_ctxt()
1486 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) in find_shared_ctxt()
1488 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { in find_shared_ctxt()
1489 struct qib_ctxtdata *rcd = dd->rcd[i]; in find_shared_ctxt()
1492 if (!rcd || !rcd->cnt) in find_shared_ctxt()
1495 if (rcd->subctxt_id != uinfo->spu_subctxt_id) in find_shared_ctxt()
1498 if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt || in find_shared_ctxt()
1499 rcd->userversion != uinfo->spu_userversion || in find_shared_ctxt()
1500 rcd->cnt >= rcd->subctxt_cnt) { in find_shared_ctxt()
1501 ret = -EINVAL; in find_shared_ctxt()
1505 subctxt_fp(fp) = rcd->cnt++; in find_shared_ctxt()
1506 rcd->subpid[subctxt_fp(fp)] = current->pid; in find_shared_ctxt()
1508 rcd->active_slaves |= 1 << subctxt_fp(fp); in find_shared_ctxt()
1521 fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL); in qib_open()
1522 if (fp->private_data) /* no cpu affinity by default */ in qib_open()
1523 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1; in qib_open()
1524 return fp->private_data ? 0 : -ENOMEM; in qib_open()
1531 *unit = -1; in find_hca()
1535 ret = -ENXIO; in find_hca()
1539 ret = -ENETDOWN; in find_hca()
1546 if (pcibus_to_node(dd->pcidev->bus) < 0) { in find_hca()
1547 ret = -EINVAL; in find_hca()
1551 pcibus_to_node(dd->pcidev->bus)) { in find_hca()
1563 struct qib_filedata *fd = fp->private_data; in do_qib_user_sdma_queue_create()
1564 struct qib_ctxtdata *rcd = fd->rcd; in do_qib_user_sdma_queue_create()
1565 struct qib_devdata *dd = rcd->dd; in do_qib_user_sdma_queue_create()
1567 if (dd->flags & QIB_HAS_SEND_DMA) { in do_qib_user_sdma_queue_create()
1569 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, in do_qib_user_sdma_queue_create()
1570 dd->unit, in do_qib_user_sdma_queue_create()
1571 rcd->ctxt, in do_qib_user_sdma_queue_create()
1572 fd->subctxt); in do_qib_user_sdma_queue_create()
1573 if (!fd->pq) in do_qib_user_sdma_queue_create()
1574 return -ENOMEM; in do_qib_user_sdma_queue_create()
1591 ret = -EINVAL; in qib_assign_ctxt()
1596 swmajor = uinfo->spu_userversion >> 16; in qib_assign_ctxt()
1598 ret = -ENODEV; in qib_assign_ctxt()
1602 swminor = uinfo->spu_userversion & 0xffff; in qib_assign_ctxt()
1604 if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT) in qib_assign_ctxt()
1605 alg = uinfo->spu_port_alg; in qib_assign_ctxt()
1610 uinfo->spu_subctxt_cnt) { in qib_assign_ctxt()
1615 assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd); in qib_assign_ctxt()
1620 i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE; in qib_assign_ctxt()
1622 ret = find_free_ctxt(i_minor - 1, fp, uinfo); in qib_assign_ctxt()
1625 const unsigned int cpu = cpumask_first(current->cpus_ptr); in qib_assign_ctxt()
1626 const unsigned int weight = current->nr_cpus_allowed; in qib_assign_ctxt()
1658 ret = wait_event_interruptible(rcd->wait, in qib_do_user_init()
1659 !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag)); in qib_do_user_init()
1663 dd = rcd->dd; in qib_do_user_init()
1666 uctxt = rcd->ctxt - dd->first_user_ctxt; in qib_do_user_init()
1667 if (uctxt < dd->ctxts_extrabuf) { in qib_do_user_init()
1668 rcd->piocnt = dd->pbufsctxt + 1; in qib_do_user_init()
1669 rcd->pio_base = rcd->piocnt * uctxt; in qib_do_user_init()
1671 rcd->piocnt = dd->pbufsctxt; in qib_do_user_init()
1672 rcd->pio_base = rcd->piocnt * uctxt + in qib_do_user_init()
1673 dd->ctxts_extrabuf; in qib_do_user_init()
1682 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) { in qib_do_user_init()
1683 if (rcd->pio_base >= dd->piobcnt2k) { in qib_do_user_init()
1686 dd->unit, rcd->ctxt); in qib_do_user_init()
1687 ret = -ENOBUFS; in qib_do_user_init()
1690 rcd->piocnt = dd->piobcnt2k - rcd->pio_base; in qib_do_user_init()
1692 rcd->ctxt, rcd->piocnt); in qib_do_user_init()
1695 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign; in qib_do_user_init()
1696 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, in qib_do_user_init()
1701 * have the in-memory copy out of date at startup for this range of in qib_do_user_init()
1702 * buffers, when a context gets re-used. Do after the chg_pioavail in qib_do_user_init()
1708 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_do_user_init()
1712 * array for time being. If rcd->ctxt > chip-supported, in qib_do_user_init()
1722 rcd->tidcursor = 0; /* start at beginning after open */ in qib_do_user_init()
1725 rcd->urgent = 0; in qib_do_user_init()
1726 rcd->urgent_poll = 0; in qib_do_user_init()
1735 * explicitly set the in-memory tail copy to 0 beforehand, so we in qib_do_user_init()
1739 if (rcd->rcvhdrtail_kvaddr) in qib_do_user_init()
1742 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB, in qib_do_user_init()
1743 rcd->ctxt); in qib_do_user_init()
1746 if (rcd->subctxt_cnt) { in qib_do_user_init()
1747 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag); in qib_do_user_init()
1748 wake_up(&rcd->wait); in qib_do_user_init()
1753 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, in qib_do_user_init()
1760 * unlock_expected_tids - unlock any expected TID entries context still had
1769 struct qib_devdata *dd = rcd->dd; in unlock_expected_tids()
1770 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; in unlock_expected_tids()
1771 int i, maxtid = ctxt_tidbase + dd->rcvtidcnt; in unlock_expected_tids()
1774 struct page *p = dd->pageshadow[i]; in unlock_expected_tids()
1780 phys = dd->physshadow[i]; in unlock_expected_tids()
1781 dd->physshadow[i] = dd->tidinvalid; in unlock_expected_tids()
1782 dd->pageshadow[i] = NULL; in unlock_expected_tids()
1783 dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE, in unlock_expected_tids()
1799 fd = fp->private_data; in qib_close()
1800 fp->private_data = NULL; in qib_close()
1801 rcd = fd->rcd; in qib_close()
1807 dd = rcd->dd; in qib_close()
1813 if (fd->pq) { in qib_close()
1814 qib_user_sdma_queue_drain(rcd->ppd, fd->pq); in qib_close()
1815 qib_user_sdma_queue_destroy(fd->pq); in qib_close()
1818 if (fd->rec_cpu_num != -1) in qib_close()
1819 __clear_bit(fd->rec_cpu_num, qib_cpulist); in qib_close()
1821 if (--rcd->cnt) { in qib_close()
1827 rcd->active_slaves &= ~(1 << fd->subctxt); in qib_close()
1828 rcd->subpid[fd->subctxt] = 0; in qib_close()
1834 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_close()
1835 ctxt = rcd->ctxt; in qib_close()
1836 dd->rcd[ctxt] = NULL; in qib_close()
1837 rcd->pid = 0; in qib_close()
1838 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_close()
1840 if (rcd->rcvwait_to || rcd->piowait_to || in qib_close()
1841 rcd->rcvnowait || rcd->pionowait) { in qib_close()
1842 rcd->rcvwait_to = 0; in qib_close()
1843 rcd->piowait_to = 0; in qib_close()
1844 rcd->rcvnowait = 0; in qib_close()
1845 rcd->pionowait = 0; in qib_close()
1847 if (rcd->flag) in qib_close()
1848 rcd->flag = 0; in qib_close()
1850 if (dd->kregbase) { in qib_close()
1852 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS | in qib_close()
1857 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt); in qib_close()
1858 qib_chg_pioavailkernel(dd, rcd->pio_base, in qib_close()
1859 rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL); in qib_close()
1861 dd->f_clear_tids(dd, rcd); in qib_close()
1863 if (dd->pageshadow) in qib_close()
1865 qib_stats.sps_ctxts--; in qib_close()
1866 dd->freectxts++; in qib_close()
1885 fd = fp->private_data; in qib_ctxt_info()
1888 info.unit = rcd->dd->unit; in qib_ctxt_info()
1889 info.port = rcd->ppd->port; in qib_ctxt_info()
1890 info.ctxt = rcd->ctxt; in qib_ctxt_info()
1893 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt; in qib_ctxt_info()
1894 info.num_subctxts = rcd->subctxt_cnt; in qib_ctxt_info()
1895 info.rec_cpu = fd->rec_cpu_num; in qib_ctxt_info()
1899 ret = -EFAULT; in qib_ctxt_info()
1914 return -EFAULT; in qib_sdma_get_inflight()
1927 return -EINVAL; in qib_sdma_get_complete()
1935 return -EFAULT; in qib_sdma_get_complete()
1944 if (!usable(rcd->ppd)) { in disarm_req_delay()
1952 if (rcd->user_event_mask) { in disarm_req_delay()
1958 &rcd->user_event_mask[0]); in disarm_req_delay()
1959 for (i = 1; i < rcd->subctxt_cnt; i++) in disarm_req_delay()
1961 &rcd->user_event_mask[i]); in disarm_req_delay()
1963 for (i = 0; !usable(rcd->ppd) && i < 300; i++) in disarm_req_delay()
1965 ret = -ENETDOWN; in disarm_req_delay()
1982 spin_lock_irqsave(&ppd->dd->uctxt_lock, flags); in qib_set_uevent_bits()
1983 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; in qib_set_uevent_bits()
1985 rcd = ppd->dd->rcd[ctxt]; in qib_set_uevent_bits()
1988 if (rcd->user_event_mask) { in qib_set_uevent_bits()
1994 set_bit(evtbit, &rcd->user_event_mask[0]); in qib_set_uevent_bits()
1995 for (i = 1; i < rcd->subctxt_cnt; i++) in qib_set_uevent_bits()
1996 set_bit(evtbit, &rcd->user_event_mask[i]); in qib_set_uevent_bits()
2001 spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags); in qib_set_uevent_bits()
2027 clear_bit(i, &rcd->user_event_mask[subctxt]); in qib_user_event_ack()
2045 task_tgid_vnr(current), current->comm); in qib_write()
2046 return -EACCES; in qib_write()
2050 ret = -EINVAL; in qib_write()
2056 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { in qib_write()
2057 ret = -EFAULT; in qib_write()
2068 src = &ucmd->cmd.user_info; in qib_write()
2074 src = &ucmd->cmd.recv_ctrl; in qib_write()
2080 src = &ucmd->cmd.ctxt_info; in qib_write()
2087 src = &ucmd->cmd.tid_info; in qib_write()
2093 src = &ucmd->cmd.part_key; in qib_write()
2106 src = &ucmd->cmd.poll_type; in qib_write()
2112 src = &ucmd->cmd.armlaunch_ctrl; in qib_write()
2118 src = &ucmd->cmd.sdma_inflight; in qib_write()
2124 src = &ucmd->cmd.sdma_complete; in qib_write()
2130 src = &ucmd->cmd.event_mask; in qib_write()
2134 ret = -EINVAL; in qib_write()
2139 if ((count - consumed) < copy) { in qib_write()
2140 ret = -EINVAL; in qib_write()
2144 ret = -EFAULT; in qib_write()
2152 ret = -EINVAL; in qib_write()
2159 ret = -EINVAL; in qib_write()
2204 qib_force_pio_avail_update(rcd->dd); in qib_write()
2208 rcd->poll_type = cmd.cmd.poll_type; in qib_write()
2212 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl); in qib_write()
2222 ret = qib_sdma_get_complete(rcd->ppd, in qib_write()
2243 struct qib_filedata *fp = iocb->ki_filp->private_data; in qib_write_iter()
2244 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); in qib_write_iter()
2245 struct qib_user_sdma_queue *pq = fp->pq; in qib_write_iter()
2247 if (!user_backed_iter(from) || !from->nr_segs || !pq) in qib_write_iter()
2248 return -EINVAL; in qib_write_iter()
2250 return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs); in qib_write_iter()
2271 ret = -ENOMEM; in qib_cdev_init()
2275 cdev->owner = THIS_MODULE; in qib_cdev_init()
2276 cdev->ops = fops; in qib_cdev_init()
2277 kobject_set_name(&cdev->kobj, name); in qib_cdev_init()
2282 minor, name, -ret); in qib_cdev_init()
2292 minor, name, -ret); in qib_cdev_init()
2326 pr_err("Could not allocate chrdev region (err %d)\n", -ret); in qib_dev_init()
2332 pr_err("Could not create device class (err %d)\n", -ret); in qib_dev_init()
2355 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device); in qib_user_remove()
2370 snprintf(name, sizeof(name), "ipath%d", dd->unit); in qib_user_add()
2371 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops, in qib_user_add()
2372 &dd->user_cdev, &dd->user_device); in qib_user_add()
2380 * Create per-unit files in /dev
2394 * Remove per-unit files in /dev