| /src/tools/test/stress2/misc/ |
| H A D | select.sh | 134 int e, i, j, pids[PARALLEL], r, status; 136 lines = LINES / PARALLEL; 145 if ((r = pthread_barrier_init(&barr, &attr, PARALLEL)) != 0) 150 for (i = 0; i < PARALLEL; i++) { 154 for (i = 0; i < PARALLEL; i++) { 158 for (j = i + 1; j < PARALLEL; j++)
|
| H A D | kevent15.sh | 95 while (atomic_load(&share[SYNC]) != PARALLEL) 121 pid_t pids[PARALLEL]; 134 for (i = 0; i < PARALLEL; i++) { 140 while (share[ACT] != PARALLEL) 143 for (i = 0; i < PARALLEL; i++) { 148 for (i = 0; i < PARALLEL; i++) {
|
| H A D | freepages.sh | 80 while (share[SYNC] != PARALLEL) 84 len = atol(s) / PARALLEL; 101 pid_t pids[PARALLEL]; 111 for (i = 0; i < PARALLEL; i++) { 117 for (i = 0; i < PARALLEL; i++) {
|
| H A D | advlock.sh | 119 while (share[SYNC] != PARALLEL) 139 while (share[SYNC] != PARALLEL) 162 while (share[SYNC] != PARALLEL) 183 while (share[SYNC] != PARALLEL) 220 for (i = 0; i < PARALLEL; i++) { 233 for (i = 0; i < PARALLEL; i++) {
|
| H A D | indir.sh | 164 pid_t pids[PARALLEL]; 173 files = atol(argv[2]) / PARALLEL; 175 files, PARALLEL, files * PARALLEL); 180 for (i = 0; i < PARALLEL; i++) { 186 for (i = 0; i < PARALLEL; i++) {
|
| H A D | mmap31.sh | 123 while (share[SYNC] != PARALLEL) 130 while (share[SYNC] != PARALLEL) 146 pid_t pids[PARALLEL]; 160 for (i = 0; i < PARALLEL; i++) { 166 for (i = 0; i < PARALLEL; i++) {
|
| H A D | rename10.sh | 160 pid_t pids[PARALLEL], spids[PARALLEL]; 163 for (i = 0; i < PARALLEL; i++) { 172 for (i = 0; i < PARALLEL; i++) { 178 for (i = 0; i < PARALLEL * 2; i++)
|
| H A D | laundry.sh | 103 while (share[SYNC] != PARALLEL) 128 pid_t pids[PARALLEL]; 135 size /= PARALLEL; 145 for (i = 0; i < PARALLEL; i++) { 151 for (i = 0; i < PARALLEL; i++) {
|
| H A D | kevent11.sh | 118 while (share[SYNC] != PARALLEL) 133 pid_t pids[PARALLEL]; 151 for (i = 0; i < PARALLEL; i++) { 158 while (share[SYNC] != PARALLEL) 163 for (i = 0; i < PARALLEL; i++) {
|
| H A D | fsgs.sh | 85 while (share[SYNC] != PARALLEL) 107 pid_t pids[PARALLEL]; 121 for (i = 0; i < PARALLEL; i++) { 127 for (i = 0; i < PARALLEL; i++) {
|
| H A D | suj5.sh | 159 if (bl > (int64_t)INT_MAX * PARALLEL) 160 bl = (int64_t)INT_MAX * PARALLEL; 161 size = bl / PARALLEL / 1024; 170 for (j = 0; j < PARALLEL; j++) { 175 for (j = 0; j < PARALLEL; j++)
|
| H A D | suj6.sh | 161 if (bl > (int64_t)INT_MAX * PARALLEL) 162 bl = (int64_t)INT_MAX * PARALLEL; 163 size = bl / PARALLEL / 1024; 172 for (j = 0; j < PARALLEL; j++) { 177 for (j = 0; j < PARALLEL; j++)
|
| H A D | pipe3.sh | 108 pid_t pids[PARALLEL], rpid; 113 for (i = 0; i < PARALLEL; i++) 119 for (i = 0; i < PARALLEL; i++) { 128 for (i = 0; i < PARALLEL; i++) {
|
| H A D | nfs15.sh | 172 pid_t pids[PARALLEL]; 184 for (i = 0; i < PARALLEL; i++) { 192 for (i = 0; i < PARALLEL; i++) 195 for (i = 0; i < PARALLEL; i++) 205 for (i = 0; i < PARALLEL; i++) {
|
| H A D | nfs16.sh | 156 pid_t pids[PARALLEL]; 168 for (i = 0; i < PARALLEL; i++) { 176 for (i = 0; i < PARALLEL; i++) 179 for (i = 0; i < PARALLEL; i++) 189 for (i = 0; i < PARALLEL; i++) {
|
| H A D | ffs_sync.sh | 171 for (i = 0; i < PARALLEL; i++) 181 for (i = 0; i < PARALLEL; i++) 191 for (i = 0; i < PARALLEL; i++) 230 for (i = 0; i < PARALLEL; i++) 245 for (i = 0; i < PARALLEL; i++) {
|
| H A D | tmpfs17.sh | 103 while (share[SYNC] != PARALLEL) 125 int e, i, pids[PARALLEL], status; 133 for (i = 0; i < PARALLEL; i++) { 137 for (i = 0; i < PARALLEL; i++) {
|
| H A D | getrandom.sh | 99 while (share[SYNC] != PARALLEL) 113 pid_t pids[PARALLEL]; 127 for (i = 0; i < PARALLEL; i++) { 133 for (i = 0; i < PARALLEL; i++) {
|
| H A D | pause.sh | 90 while (share[SYNC] != PARALLEL) 118 pid_t pids[PARALLEL]; 139 for (i = 0; i < PARALLEL; i++) { 145 for (i = 0; i < PARALLEL; i++) {
|
| H A D | sigstop2.sh | 80 while (share[SYNC] != PARALLEL) 110 pid_t pids[PARALLEL]; 124 for (i = 0; i < PARALLEL; i++) { 130 for (i = 0; i < PARALLEL; i++) {
|
| H A D | rmdir.sh | 45 while (share[SYNC] != PARALLEL) 60 pid_t pids[PARALLEL]; 74 for (i = 0; i < PARALLEL; i++) { 80 for (i = 0; i < PARALLEL; i++) {
|
| H A D | sendfile16.sh | 118 while (share[SYNC] != PARALLEL) 179 while (share[SYNC2] != PARALLEL) 188 pid_t pids[PARALLEL]; 201 for (i = 0; i < PARALLEL; i++) { 207 for (i = 0; i < PARALLEL; i++) {
|
| /src/tools/test/stress2/tools/ |
| H A D | bench.c | 45 #define PARALLEL 3 macro 68 while (share[SYNC] != (volatile u_int)tests * PARALLEL) in cr1() 99 while (share[SYNC] != (volatile u_int)tests * PARALLEL) in cr2() 127 while (share[SYNC] != (volatile u_int)tests * PARALLEL) in cr3() 160 while (share[SYNC] != (volatile u_int)tests * PARALLEL) in rn1() 193 while (share[SYNC] != (volatile u_int)tests * PARALLEL) in rw1() 233 while (share[SYNC] != (volatile u_int)tests * PARALLEL) in rw2() 266 pid_t pids[PARALLEL]; in spawn() 276 for (i = 0; i < PARALLEL; i++) { in spawn() 284 for (i = 0; i < PARALLEL; i++) { in spawn()
|
| /src/sys/contrib/device-tree/Bindings/media/i2c/ |
| H A D | st,st-mipid02.txt | 1 STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge 5 through PARALLEL output port. 9 PARALLEL output port has a maximum width of 12 bits. 34 2 PARALLEL output 42 Endpoint node required property for PARALLEL connection is: 44 Endpoint node optional properties for PARALLEL connection are:
|
| /src/tests/sys/kern/ |
| H A D | kern_descrip_test.c | 50 #define PARALLEL 4 macro 126 for (i = 0; i < PARALLEL; i++) in openfiles() 128 openfiles2(n / PARALLEL); in openfiles() 129 while (done != PARALLEL) { in openfiles() 135 for (i = 0; i < PARALLEL; i++) in openfiles()
|