1 /*-
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 */
19
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/bio.h>
23 #include <sys/buf.h>
24 #include <sys/conf.h>
25 #include <sys/malloc.h>
26 #include <sys/proc.h>
27 #include <sys/racct.h>
28 #include <sys/rwlock.h>
29 #include <sys/uio.h>
30 #include <geom/geom.h>
31
32 #include <vm/vm.h>
33 #include <vm/vm_object.h>
34 #include <vm/vm_page.h>
35 #include <vm/vm_pager.h>
36 #include <vm/vm_extern.h>
37 #include <vm/vm_map.h>
38
39 int
physio(struct cdev * dev,struct uio * uio,int ioflag)40 physio(struct cdev *dev, struct uio *uio, int ioflag)
41 {
42 struct cdevsw *csw;
43 struct buf *pbuf;
44 struct bio *bp;
45 struct vm_page **pages;
46 char *base, *sa;
47 u_int iolen, poff;
48 int error, i, npages, maxpages;
49 vm_prot_t prot;
50
51 csw = dev->si_devsw;
52 npages = 0;
53 sa = NULL;
54 /* check if character device is being destroyed */
55 if (csw == NULL)
56 return (ENXIO);
57
58 /* XXX: sanity check */
59 if (dev->si_iosize_max < PAGE_SIZE) {
60 printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
61 devtoname(dev), dev->si_iosize_max);
62 dev->si_iosize_max = DFLTPHYS;
63 }
64
65 /*
66 * If the driver does not want I/O to be split, that means that we
67 * need to reject any requests that will not fit into one buffer.
68 */
69 if (dev->si_flags & SI_NOSPLIT &&
70 (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > maxphys ||
71 uio->uio_iovcnt > 1)) {
72 /*
73 * Tell the user why his I/O was rejected.
74 */
75 if (uio->uio_resid > dev->si_iosize_max)
76 uprintf("%s: request size=%zd > si_iosize_max=%d; "
77 "cannot split request\n", devtoname(dev),
78 uio->uio_resid, dev->si_iosize_max);
79 if (uio->uio_resid > maxphys)
80 uprintf("%s: request size=%zd > maxphys=%lu; "
81 "cannot split request\n", devtoname(dev),
82 uio->uio_resid, maxphys);
83 if (uio->uio_iovcnt > 1)
84 uprintf("%s: request vectors=%d > 1; "
85 "cannot split request\n", devtoname(dev),
86 uio->uio_iovcnt);
87 return (EFBIG);
88 }
89
90 bp = g_alloc_bio();
91 if (uio->uio_segflg != UIO_USERSPACE) {
92 pbuf = NULL;
93 pages = NULL;
94 } else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
95 pbuf = NULL;
96 maxpages = btoc(MIN(uio->uio_resid, maxphys)) + 1;
97 pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
98 } else {
99 pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
100 MPASS((pbuf->b_flags & B_MAXPHYS) != 0);
101 sa = pbuf->b_data;
102 maxpages = PBUF_PAGES;
103 pages = pbuf->b_pages;
104 }
105 prot = VM_PROT_READ;
106 if (uio->uio_rw == UIO_READ)
107 prot |= VM_PROT_WRITE; /* Less backwards than it looks */
108 error = 0;
109 for (i = 0; i < uio->uio_iovcnt; i++) {
110 #ifdef RACCT
111 if (racct_enable) {
112 PROC_LOCK(curproc);
113 switch (uio->uio_rw) {
114 case UIO_READ:
115 racct_add_force(curproc, RACCT_READBPS,
116 uio->uio_iov[i].iov_len);
117 racct_add_force(curproc, RACCT_READIOPS, 1);
118 break;
119 case UIO_WRITE:
120 racct_add_force(curproc, RACCT_WRITEBPS,
121 uio->uio_iov[i].iov_len);
122 racct_add_force(curproc, RACCT_WRITEIOPS, 1);
123 break;
124 }
125 PROC_UNLOCK(curproc);
126 }
127 #endif /* RACCT */
128
129 while (uio->uio_iov[i].iov_len) {
130 g_reset_bio(bp);
131 switch (uio->uio_rw) {
132 case UIO_READ:
133 bp->bio_cmd = BIO_READ;
134 curthread->td_ru.ru_inblock++;
135 break;
136 case UIO_WRITE:
137 bp->bio_cmd = BIO_WRITE;
138 curthread->td_ru.ru_oublock++;
139 break;
140 }
141 bp->bio_offset = uio->uio_offset;
142 base = uio->uio_iov[i].iov_base;
143 bp->bio_length = uio->uio_iov[i].iov_len;
144 if (bp->bio_length > dev->si_iosize_max)
145 bp->bio_length = dev->si_iosize_max;
146 if (bp->bio_length > maxphys)
147 bp->bio_length = maxphys;
148 bp->bio_bcount = bp->bio_length;
149 bp->bio_dev = dev;
150
151 if (pages) {
152 if ((npages = vm_fault_quick_hold_pages(
153 &curproc->p_vmspace->vm_map,
154 (vm_offset_t)base, bp->bio_length,
155 prot, pages, maxpages)) < 0) {
156 error = EFAULT;
157 goto doerror;
158 }
159 poff = (vm_offset_t)base & PAGE_MASK;
160 if (pbuf && sa) {
161 pmap_qenter((vm_offset_t)sa,
162 pages, npages);
163 bp->bio_data = sa + poff;
164 } else {
165 bp->bio_ma = pages;
166 bp->bio_ma_n = npages;
167 bp->bio_ma_offset = poff;
168 bp->bio_data = unmapped_buf;
169 bp->bio_flags |= BIO_UNMAPPED;
170 }
171 } else
172 bp->bio_data = base;
173
174 csw->d_strategy(bp);
175 if (uio->uio_rw == UIO_READ)
176 biowait(bp, "physrd");
177 else
178 biowait(bp, "physwr");
179
180 if (pages) {
181 if (pbuf)
182 pmap_qremove((vm_offset_t)sa, npages);
183 vm_page_unhold_pages(pages, npages);
184 }
185
186 iolen = bp->bio_length - bp->bio_resid;
187 if (iolen == 0 && !(bp->bio_flags & BIO_ERROR))
188 goto doerror; /* EOF */
189 uio->uio_iov[i].iov_len -= iolen;
190 uio->uio_iov[i].iov_base =
191 (char *)uio->uio_iov[i].iov_base + iolen;
192 uio->uio_resid -= iolen;
193 uio->uio_offset += iolen;
194 if (bp->bio_flags & BIO_ERROR) {
195 error = bp->bio_error;
196 goto doerror;
197 }
198 }
199 }
200 doerror:
201 if (pbuf)
202 uma_zfree(pbuf_zone, pbuf);
203 else if (pages)
204 free(pages, M_DEVBUF);
205 g_destroy_bio(bp);
206 return (error);
207 }
208