1 /*
2  *	SGI UltraViolet TLB flush routines.
3  *
4  *	(c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
5  *
6  *	This code is released under the GNU General Public License version 2 or
7  *	later.
8  */
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
15 
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
21 #include <asm/apic.h>
22 #include <asm/idle.h>
23 #include <asm/tsc.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/timer.h>
26 
27 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28 static int timeout_base_ns[] = {
29 		20,
30 		160,
31 		1280,
32 		10240,
33 		81920,
34 		655360,
35 		5242880,
36 		167772160
37 };
38 
39 static int timeout_us;
40 static int nobau;
41 static int baudisabled;
42 static spinlock_t disable_lock;
43 static cycles_t congested_cycles;
44 
45 /* tunables: */
46 static int max_concurr		= MAX_BAU_CONCURRENT;
47 static int max_concurr_const	= MAX_BAU_CONCURRENT;
48 static int plugged_delay	= PLUGGED_DELAY;
49 static int plugsb4reset		= PLUGSB4RESET;
50 static int timeoutsb4reset	= TIMEOUTSB4RESET;
51 static int ipi_reset_limit	= IPI_RESET_LIMIT;
52 static int complete_threshold	= COMPLETE_THRESHOLD;
53 static int congested_respns_us	= CONGESTED_RESPONSE_US;
54 static int congested_reps	= CONGESTED_REPS;
55 static int congested_period	= CONGESTED_PERIOD;
56 
57 static struct tunables tunables[] = {
58 	{&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59 	{&plugged_delay, PLUGGED_DELAY},
60 	{&plugsb4reset, PLUGSB4RESET},
61 	{&timeoutsb4reset, TIMEOUTSB4RESET},
62 	{&ipi_reset_limit, IPI_RESET_LIMIT},
63 	{&complete_threshold, COMPLETE_THRESHOLD},
64 	{&congested_respns_us, CONGESTED_RESPONSE_US},
65 	{&congested_reps, CONGESTED_REPS},
66 	{&congested_period, CONGESTED_PERIOD}
67 };
68 
69 static struct dentry *tunables_dir;
70 static struct dentry *tunables_file;
71 
72 /* these correspond to the statistics printed by ptc_seq_show() */
73 static char *stat_description[] = {
74 	"sent:     number of shootdown messages sent",
75 	"stime:    time spent sending messages",
76 	"numuvhubs: number of hubs targeted with shootdown",
77 	"numuvhubs16: number times 16 or more hubs targeted",
78 	"numuvhubs8: number times 8 or more hubs targeted",
79 	"numuvhubs4: number times 4 or more hubs targeted",
80 	"numuvhubs2: number times 2 or more hubs targeted",
81 	"numuvhubs1: number times 1 hub targeted",
82 	"numcpus:  number of cpus targeted with shootdown",
83 	"dto:      number of destination timeouts",
84 	"retries:  destination timeout retries sent",
85 	"rok:   :  destination timeouts successfully retried",
86 	"resetp:   ipi-style resource resets for plugs",
87 	"resett:   ipi-style resource resets for timeouts",
88 	"giveup:   fall-backs to ipi-style shootdowns",
89 	"sto:      number of source timeouts",
90 	"bz:       number of stay-busy's",
91 	"throt:    number times spun in throttle",
92 	"swack:   image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93 	"recv:     shootdown messages received",
94 	"rtime:    time spent processing messages",
95 	"all:      shootdown all-tlb messages",
96 	"one:      shootdown one-tlb messages",
97 	"mult:     interrupts that found multiple messages",
98 	"none:     interrupts that found no messages",
99 	"retry:    number of retry messages processed",
100 	"canc:     number messages canceled by retries",
101 	"nocan:    number retries that found nothing to cancel",
102 	"reset:    number of ipi-style reset requests processed",
103 	"rcan:     number messages canceled by reset requests",
104 	"disable:  number times use of the BAU was disabled",
105 	"enable:   number times use of the BAU was re-enabled"
106 };
107 
108 static int __init
setup_nobau(char * arg)109 setup_nobau(char *arg)
110 {
111 	nobau = 1;
112 	return 0;
113 }
114 early_param("nobau", setup_nobau);
115 
116 /* base pnode in this partition */
117 static int uv_base_pnode __read_mostly;
118 
119 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
120 static DEFINE_PER_CPU(struct bau_control, bau_control);
121 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
122 
123 /*
124  * Determine the first node on a uvhub. 'Nodes' are used for kernel
125  * memory allocation.
126  */
uvhub_to_first_node(int uvhub)127 static int __init uvhub_to_first_node(int uvhub)
128 {
129 	int node, b;
130 
131 	for_each_online_node(node) {
132 		b = uv_node_to_blade_id(node);
133 		if (uvhub == b)
134 			return node;
135 	}
136 	return -1;
137 }
138 
139 /*
140  * Determine the apicid of the first cpu on a uvhub.
141  */
uvhub_to_first_apicid(int uvhub)142 static int __init uvhub_to_first_apicid(int uvhub)
143 {
144 	int cpu;
145 
146 	for_each_present_cpu(cpu)
147 		if (uvhub == uv_cpu_to_blade_id(cpu))
148 			return per_cpu(x86_cpu_to_apicid, cpu);
149 	return -1;
150 }
151 
152 /*
153  * Free a software acknowledge hardware resource by clearing its Pending
154  * bit. This will return a reply to the sender.
155  * If the message has timed out, a reply has already been sent by the
156  * hardware but the resource has not been released. In that case our
157  * clear of the Timeout bit (as well) will free the resource. No reply will
158  * be sent (the hardware will only do one reply per message).
159  */
reply_to_message(struct msg_desc * mdp,struct bau_control * bcp,int do_acknowledge)160 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
161 						int do_acknowledge)
162 {
163 	unsigned long dw;
164 	struct bau_pq_entry *msg;
165 
166 	msg = mdp->msg;
167 	if (!msg->canceled && do_acknowledge) {
168 		dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
169 		write_mmr_sw_ack(dw);
170 	}
171 	msg->replied_to = 1;
172 	msg->swack_vec = 0;
173 }
174 
175 /*
176  * Process the receipt of a RETRY message
177  */
bau_process_retry_msg(struct msg_desc * mdp,struct bau_control * bcp)178 static void bau_process_retry_msg(struct msg_desc *mdp,
179 					struct bau_control *bcp)
180 {
181 	int i;
182 	int cancel_count = 0;
183 	unsigned long msg_res;
184 	unsigned long mmr = 0;
185 	struct bau_pq_entry *msg = mdp->msg;
186 	struct bau_pq_entry *msg2;
187 	struct ptc_stats *stat = bcp->statp;
188 
189 	stat->d_retries++;
190 	/*
191 	 * cancel any message from msg+1 to the retry itself
192 	 */
193 	for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
194 		if (msg2 > mdp->queue_last)
195 			msg2 = mdp->queue_first;
196 		if (msg2 == msg)
197 			break;
198 
199 		/* same conditions for cancellation as do_reset */
200 		if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
201 		    (msg2->swack_vec) && ((msg2->swack_vec &
202 			msg->swack_vec) == 0) &&
203 		    (msg2->sending_cpu == msg->sending_cpu) &&
204 		    (msg2->msg_type != MSG_NOOP)) {
205 			mmr = read_mmr_sw_ack();
206 			msg_res = msg2->swack_vec;
207 			/*
208 			 * This is a message retry; clear the resources held
209 			 * by the previous message only if they timed out.
210 			 * If it has not timed out we have an unexpected
211 			 * situation to report.
212 			 */
213 			if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
214 				unsigned long mr;
215 				/*
216 				 * Is the resource timed out?
217 				 * Make everyone ignore the cancelled message.
218 				 */
219 				msg2->canceled = 1;
220 				stat->d_canceled++;
221 				cancel_count++;
222 				mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
223 				write_mmr_sw_ack(mr);
224 			}
225 		}
226 	}
227 	if (!cancel_count)
228 		stat->d_nocanceled++;
229 }
230 
231 /*
232  * Do all the things a cpu should do for a TLB shootdown message.
233  * Other cpu's may come here at the same time for this message.
234  */
bau_process_message(struct msg_desc * mdp,struct bau_control * bcp,int do_acknowledge)235 static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
236 						int do_acknowledge)
237 {
238 	short socket_ack_count = 0;
239 	short *sp;
240 	struct atomic_short *asp;
241 	struct ptc_stats *stat = bcp->statp;
242 	struct bau_pq_entry *msg = mdp->msg;
243 	struct bau_control *smaster = bcp->socket_master;
244 
245 	/*
246 	 * This must be a normal message, or retry of a normal message
247 	 */
248 	if (msg->address == TLB_FLUSH_ALL) {
249 		local_flush_tlb();
250 		stat->d_alltlb++;
251 	} else {
252 		__flush_tlb_one(msg->address);
253 		stat->d_onetlb++;
254 	}
255 	stat->d_requestee++;
256 
257 	/*
258 	 * One cpu on each uvhub has the additional job on a RETRY
259 	 * of releasing the resource held by the message that is
260 	 * being retried.  That message is identified by sending
261 	 * cpu number.
262 	 */
263 	if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
264 		bau_process_retry_msg(mdp, bcp);
265 
266 	/*
267 	 * This is a swack message, so we have to reply to it.
268 	 * Count each responding cpu on the socket. This avoids
269 	 * pinging the count's cache line back and forth between
270 	 * the sockets.
271 	 */
272 	sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
273 	asp = (struct atomic_short *)sp;
274 	socket_ack_count = atom_asr(1, asp);
275 	if (socket_ack_count == bcp->cpus_in_socket) {
276 		int msg_ack_count;
277 		/*
278 		 * Both sockets dump their completed count total into
279 		 * the message's count.
280 		 */
281 		smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
282 		asp = (struct atomic_short *)&msg->acknowledge_count;
283 		msg_ack_count = atom_asr(socket_ack_count, asp);
284 
285 		if (msg_ack_count == bcp->cpus_in_uvhub) {
286 			/*
287 			 * All cpus in uvhub saw it; reply
288 			 * (unless we are in the UV2 workaround)
289 			 */
290 			reply_to_message(mdp, bcp, do_acknowledge);
291 		}
292 	}
293 
294 	return;
295 }
296 
297 /*
298  * Determine the first cpu on a pnode.
299  */
pnode_to_first_cpu(int pnode,struct bau_control * smaster)300 static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
301 {
302 	int cpu;
303 	struct hub_and_pnode *hpp;
304 
305 	for_each_present_cpu(cpu) {
306 		hpp = &smaster->thp[cpu];
307 		if (pnode == hpp->pnode)
308 			return cpu;
309 	}
310 	return -1;
311 }
312 
313 /*
314  * Last resort when we get a large number of destination timeouts is
315  * to clear resources held by a given cpu.
316  * Do this with IPI so that all messages in the BAU message queue
317  * can be identified by their nonzero swack_vec field.
318  *
319  * This is entered for a single cpu on the uvhub.
320  * The sender want's this uvhub to free a specific message's
321  * swack resources.
322  */
do_reset(void * ptr)323 static void do_reset(void *ptr)
324 {
325 	int i;
326 	struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
327 	struct reset_args *rap = (struct reset_args *)ptr;
328 	struct bau_pq_entry *msg;
329 	struct ptc_stats *stat = bcp->statp;
330 
331 	stat->d_resets++;
332 	/*
333 	 * We're looking for the given sender, and
334 	 * will free its swack resource.
335 	 * If all cpu's finally responded after the timeout, its
336 	 * message 'replied_to' was set.
337 	 */
338 	for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
339 		unsigned long msg_res;
340 		/* do_reset: same conditions for cancellation as
341 		   bau_process_retry_msg() */
342 		if ((msg->replied_to == 0) &&
343 		    (msg->canceled == 0) &&
344 		    (msg->sending_cpu == rap->sender) &&
345 		    (msg->swack_vec) &&
346 		    (msg->msg_type != MSG_NOOP)) {
347 			unsigned long mmr;
348 			unsigned long mr;
349 			/*
350 			 * make everyone else ignore this message
351 			 */
352 			msg->canceled = 1;
353 			/*
354 			 * only reset the resource if it is still pending
355 			 */
356 			mmr = read_mmr_sw_ack();
357 			msg_res = msg->swack_vec;
358 			mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
359 			if (mmr & msg_res) {
360 				stat->d_rcanceled++;
361 				write_mmr_sw_ack(mr);
362 			}
363 		}
364 	}
365 	return;
366 }
367 
368 /*
369  * Use IPI to get all target uvhubs to release resources held by
370  * a given sending cpu number.
371  */
reset_with_ipi(struct pnmask * distribution,struct bau_control * bcp)372 static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
373 {
374 	int pnode;
375 	int apnode;
376 	int maskbits;
377 	int sender = bcp->cpu;
378 	cpumask_t *mask = bcp->uvhub_master->cpumask;
379 	struct bau_control *smaster = bcp->socket_master;
380 	struct reset_args reset_args;
381 
382 	reset_args.sender = sender;
383 	cpus_clear(*mask);
384 	/* find a single cpu for each uvhub in this distribution mask */
385 	maskbits = sizeof(struct pnmask) * BITSPERBYTE;
386 	/* each bit is a pnode relative to the partition base pnode */
387 	for (pnode = 0; pnode < maskbits; pnode++) {
388 		int cpu;
389 		if (!bau_uvhub_isset(pnode, distribution))
390 			continue;
391 		apnode = pnode + bcp->partition_base_pnode;
392 		cpu = pnode_to_first_cpu(apnode, smaster);
393 		cpu_set(cpu, *mask);
394 	}
395 
396 	/* IPI all cpus; preemption is already disabled */
397 	smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
398 	return;
399 }
400 
cycles_2_us(unsigned long long cyc)401 static inline unsigned long cycles_2_us(unsigned long long cyc)
402 {
403 	unsigned long long ns;
404 	unsigned long us;
405 	int cpu = smp_processor_id();
406 
407 	ns =  (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
408 	us = ns / 1000;
409 	return us;
410 }
411 
412 /*
413  * wait for all cpus on this hub to finish their sends and go quiet
414  * leaves uvhub_quiesce set so that no new broadcasts are started by
415  * bau_flush_send_and_wait()
416  */
quiesce_local_uvhub(struct bau_control * hmaster)417 static inline void quiesce_local_uvhub(struct bau_control *hmaster)
418 {
419 	atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
420 }
421 
422 /*
423  * mark this quiet-requestor as done
424  */
end_uvhub_quiesce(struct bau_control * hmaster)425 static inline void end_uvhub_quiesce(struct bau_control *hmaster)
426 {
427 	atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
428 }
429 
uv1_read_status(unsigned long mmr_offset,int right_shift)430 static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
431 {
432 	unsigned long descriptor_status;
433 
434 	descriptor_status = uv_read_local_mmr(mmr_offset);
435 	descriptor_status >>= right_shift;
436 	descriptor_status &= UV_ACT_STATUS_MASK;
437 	return descriptor_status;
438 }
439 
440 /*
441  * Wait for completion of a broadcast software ack message
442  * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
443  */
uv1_wait_completion(struct bau_desc * bau_desc,unsigned long mmr_offset,int right_shift,struct bau_control * bcp,long try)444 static int uv1_wait_completion(struct bau_desc *bau_desc,
445 				unsigned long mmr_offset, int right_shift,
446 				struct bau_control *bcp, long try)
447 {
448 	unsigned long descriptor_status;
449 	cycles_t ttm;
450 	struct ptc_stats *stat = bcp->statp;
451 
452 	descriptor_status = uv1_read_status(mmr_offset, right_shift);
453 	/* spin on the status MMR, waiting for it to go idle */
454 	while ((descriptor_status != DS_IDLE)) {
455 		/*
456 		 * Our software ack messages may be blocked because
457 		 * there are no swack resources available.  As long
458 		 * as none of them has timed out hardware will NACK
459 		 * our message and its state will stay IDLE.
460 		 */
461 		if (descriptor_status == DS_SOURCE_TIMEOUT) {
462 			stat->s_stimeout++;
463 			return FLUSH_GIVEUP;
464 		} else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
465 			stat->s_dtimeout++;
466 			ttm = get_cycles();
467 
468 			/*
469 			 * Our retries may be blocked by all destination
470 			 * swack resources being consumed, and a timeout
471 			 * pending.  In that case hardware returns the
472 			 * ERROR that looks like a destination timeout.
473 			 */
474 			if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
475 				bcp->conseccompletes = 0;
476 				return FLUSH_RETRY_PLUGGED;
477 			}
478 
479 			bcp->conseccompletes = 0;
480 			return FLUSH_RETRY_TIMEOUT;
481 		} else {
482 			/*
483 			 * descriptor_status is still BUSY
484 			 */
485 			cpu_relax();
486 		}
487 		descriptor_status = uv1_read_status(mmr_offset, right_shift);
488 	}
489 	bcp->conseccompletes++;
490 	return FLUSH_COMPLETE;
491 }
492 
493 /*
494  * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
495  */
uv2_read_status(unsigned long offset,int rshft,int desc)496 static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
497 {
498 	unsigned long descriptor_status;
499 	unsigned long descriptor_status2;
500 
501 	descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
502 	descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
503 	descriptor_status = (descriptor_status << 1) | descriptor_status2;
504 	return descriptor_status;
505 }
506 
507 /*
508  * Return whether the status of the descriptor that is normally used for this
509  * cpu (the one indexed by its hub-relative cpu number) is busy.
510  * The status of the original 32 descriptors is always reflected in the 64
511  * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
512  * The bit provided by the activation_status_2 register is irrelevant to
513  * the status if it is only being tested for busy or not busy.
514  */
normal_busy(struct bau_control * bcp)515 int normal_busy(struct bau_control *bcp)
516 {
517 	int cpu = bcp->uvhub_cpu;
518 	int mmr_offset;
519 	int right_shift;
520 
521 	mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
522 	right_shift = cpu * UV_ACT_STATUS_SIZE;
523 	return (((((read_lmmr(mmr_offset) >> right_shift) &
524 				UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
525 }
526 
527 /*
528  * Entered when a bau descriptor has gone into a permanent busy wait because
529  * of a hardware bug.
530  * Workaround the bug.
531  */
handle_uv2_busy(struct bau_control * bcp)532 int handle_uv2_busy(struct bau_control *bcp)
533 {
534 	int busy_one = bcp->using_desc;
535 	int normal = bcp->uvhub_cpu;
536 	int selected = -1;
537 	int i;
538 	unsigned long descriptor_status;
539 	unsigned long status;
540 	int mmr_offset;
541 	struct bau_desc *bau_desc_old;
542 	struct bau_desc *bau_desc_new;
543 	struct bau_control *hmaster = bcp->uvhub_master;
544 	struct ptc_stats *stat = bcp->statp;
545 	cycles_t ttm;
546 
547 	stat->s_uv2_wars++;
548 	spin_lock(&hmaster->uvhub_lock);
549 	/* try for the original first */
550 	if (busy_one != normal) {
551 		if (!normal_busy(bcp))
552 			selected = normal;
553 	}
554 	if (selected < 0) {
555 		/* can't use the normal, select an alternate */
556 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
557 		descriptor_status = read_lmmr(mmr_offset);
558 
559 		/* scan available descriptors 32-63 */
560 		for (i = 0; i < UV_CPUS_PER_AS; i++) {
561 			if ((hmaster->inuse_map & (1 << i)) == 0) {
562 				status = ((descriptor_status >>
563 						(i * UV_ACT_STATUS_SIZE)) &
564 						UV_ACT_STATUS_MASK) << 1;
565 				if (status != UV2H_DESC_BUSY) {
566 					selected = i + UV_CPUS_PER_AS;
567 					break;
568 				}
569 			}
570 		}
571 	}
572 
573 	if (busy_one != normal)
574 		/* mark the busy alternate as not in-use */
575 		hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
576 
577 	if (selected >= 0) {
578 		/* switch to the selected descriptor */
579 		if (selected != normal) {
580 			/* set the selected alternate as in-use */
581 			hmaster->inuse_map |=
582 					(1 << (selected - UV_CPUS_PER_AS));
583 			if (selected > stat->s_uv2_wars_hw)
584 				stat->s_uv2_wars_hw = selected;
585 		}
586 		bau_desc_old = bcp->descriptor_base;
587 		bau_desc_old += (ITEMS_PER_DESC * busy_one);
588 		bcp->using_desc = selected;
589 		bau_desc_new = bcp->descriptor_base;
590 		bau_desc_new += (ITEMS_PER_DESC * selected);
591 		*bau_desc_new = *bau_desc_old;
592 	} else {
593 		/*
594 		 * All are busy. Wait for the normal one for this cpu to
595 		 * free up.
596 		 */
597 		stat->s_uv2_war_waits++;
598 		spin_unlock(&hmaster->uvhub_lock);
599 		ttm = get_cycles();
600 		do {
601 			cpu_relax();
602 		} while (normal_busy(bcp));
603 		spin_lock(&hmaster->uvhub_lock);
604 		/* switch to the original descriptor */
605 		bcp->using_desc = normal;
606 		bau_desc_old = bcp->descriptor_base;
607 		bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
608 		bcp->using_desc = (ITEMS_PER_DESC * normal);
609 		bau_desc_new = bcp->descriptor_base;
610 		bau_desc_new += (ITEMS_PER_DESC * normal);
611 		*bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
612 	}
613 	spin_unlock(&hmaster->uvhub_lock);
614 	return FLUSH_RETRY_BUSYBUG;
615 }
616 
uv2_wait_completion(struct bau_desc * bau_desc,unsigned long mmr_offset,int right_shift,struct bau_control * bcp,long try)617 static int uv2_wait_completion(struct bau_desc *bau_desc,
618 				unsigned long mmr_offset, int right_shift,
619 				struct bau_control *bcp, long try)
620 {
621 	unsigned long descriptor_stat;
622 	cycles_t ttm;
623 	int desc = bcp->using_desc;
624 	long busy_reps = 0;
625 	struct ptc_stats *stat = bcp->statp;
626 
627 	descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
628 
629 	/* spin on the status MMR, waiting for it to go idle */
630 	while (descriptor_stat != UV2H_DESC_IDLE) {
631 		/*
632 		 * Our software ack messages may be blocked because
633 		 * there are no swack resources available.  As long
634 		 * as none of them has timed out hardware will NACK
635 		 * our message and its state will stay IDLE.
636 		 */
637 		if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
638 		    (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
639 			stat->s_stimeout++;
640 			return FLUSH_GIVEUP;
641 		} else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
642 			stat->s_strongnacks++;
643 			bcp->conseccompletes = 0;
644 			return FLUSH_GIVEUP;
645 		} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
646 			stat->s_dtimeout++;
647 			bcp->conseccompletes = 0;
648 			return FLUSH_RETRY_TIMEOUT;
649 		} else {
650 			busy_reps++;
651 			if (busy_reps > 1000000) {
652 				/* not to hammer on the clock */
653 				busy_reps = 0;
654 				ttm = get_cycles();
655 				if ((ttm - bcp->send_message) >
656 					(bcp->clocks_per_100_usec)) {
657 					return handle_uv2_busy(bcp);
658 				}
659 			}
660 			/*
661 			 * descriptor_stat is still BUSY
662 			 */
663 			cpu_relax();
664 		}
665 		descriptor_stat = uv2_read_status(mmr_offset, right_shift,
666 									desc);
667 	}
668 	bcp->conseccompletes++;
669 	return FLUSH_COMPLETE;
670 }
671 
672 /*
673  * There are 2 status registers; each and array[32] of 2 bits. Set up for
674  * which register to read and position in that register based on cpu in
675  * current hub.
676  */
wait_completion(struct bau_desc * bau_desc,struct bau_control * bcp,long try)677 static int wait_completion(struct bau_desc *bau_desc,
678 				struct bau_control *bcp, long try)
679 {
680 	int right_shift;
681 	unsigned long mmr_offset;
682 	int desc = bcp->using_desc;
683 
684 	if (desc < UV_CPUS_PER_AS) {
685 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
686 		right_shift = desc * UV_ACT_STATUS_SIZE;
687 	} else {
688 		mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
689 		right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
690 	}
691 
692 	if (bcp->uvhub_version == 1)
693 		return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
694 								bcp, try);
695 	else
696 		return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
697 								bcp, try);
698 }
699 
sec_2_cycles(unsigned long sec)700 static inline cycles_t sec_2_cycles(unsigned long sec)
701 {
702 	unsigned long ns;
703 	cycles_t cyc;
704 
705 	ns = sec * 1000000000;
706 	cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
707 	return cyc;
708 }
709 
710 /*
711  * Our retries are blocked by all destination sw ack resources being
712  * in use, and a timeout is pending. In that case hardware immediately
713  * returns the ERROR that looks like a destination timeout.
714  */
destination_plugged(struct bau_desc * bau_desc,struct bau_control * bcp,struct bau_control * hmaster,struct ptc_stats * stat)715 static void destination_plugged(struct bau_desc *bau_desc,
716 			struct bau_control *bcp,
717 			struct bau_control *hmaster, struct ptc_stats *stat)
718 {
719 	udelay(bcp->plugged_delay);
720 	bcp->plugged_tries++;
721 
722 	if (bcp->plugged_tries >= bcp->plugsb4reset) {
723 		bcp->plugged_tries = 0;
724 
725 		quiesce_local_uvhub(hmaster);
726 
727 		spin_lock(&hmaster->queue_lock);
728 		reset_with_ipi(&bau_desc->distribution, bcp);
729 		spin_unlock(&hmaster->queue_lock);
730 
731 		end_uvhub_quiesce(hmaster);
732 
733 		bcp->ipi_attempts++;
734 		stat->s_resets_plug++;
735 	}
736 }
737 
destination_timeout(struct bau_desc * bau_desc,struct bau_control * bcp,struct bau_control * hmaster,struct ptc_stats * stat)738 static void destination_timeout(struct bau_desc *bau_desc,
739 			struct bau_control *bcp, struct bau_control *hmaster,
740 			struct ptc_stats *stat)
741 {
742 	hmaster->max_concurr = 1;
743 	bcp->timeout_tries++;
744 	if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
745 		bcp->timeout_tries = 0;
746 
747 		quiesce_local_uvhub(hmaster);
748 
749 		spin_lock(&hmaster->queue_lock);
750 		reset_with_ipi(&bau_desc->distribution, bcp);
751 		spin_unlock(&hmaster->queue_lock);
752 
753 		end_uvhub_quiesce(hmaster);
754 
755 		bcp->ipi_attempts++;
756 		stat->s_resets_timeout++;
757 	}
758 }
759 
760 /*
761  * Completions are taking a very long time due to a congested numalink
762  * network.
763  */
disable_for_congestion(struct bau_control * bcp,struct ptc_stats * stat)764 static void disable_for_congestion(struct bau_control *bcp,
765 					struct ptc_stats *stat)
766 {
767 	/* let only one cpu do this disabling */
768 	spin_lock(&disable_lock);
769 
770 	if (!baudisabled && bcp->period_requests &&
771 	    ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
772 		int tcpu;
773 		struct bau_control *tbcp;
774 		/* it becomes this cpu's job to turn on the use of the
775 		   BAU again */
776 		baudisabled = 1;
777 		bcp->set_bau_off = 1;
778 		bcp->set_bau_on_time = get_cycles();
779 		bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
780 		stat->s_bau_disabled++;
781 		for_each_present_cpu(tcpu) {
782 			tbcp = &per_cpu(bau_control, tcpu);
783 			tbcp->baudisabled = 1;
784 		}
785 	}
786 
787 	spin_unlock(&disable_lock);
788 }
789 
count_max_concurr(int stat,struct bau_control * bcp,struct bau_control * hmaster)790 static void count_max_concurr(int stat, struct bau_control *bcp,
791 				struct bau_control *hmaster)
792 {
793 	bcp->plugged_tries = 0;
794 	bcp->timeout_tries = 0;
795 	if (stat != FLUSH_COMPLETE)
796 		return;
797 	if (bcp->conseccompletes <= bcp->complete_threshold)
798 		return;
799 	if (hmaster->max_concurr >= hmaster->max_concurr_const)
800 		return;
801 	hmaster->max_concurr++;
802 }
803 
record_send_stats(cycles_t time1,cycles_t time2,struct bau_control * bcp,struct ptc_stats * stat,int completion_status,int try)804 static void record_send_stats(cycles_t time1, cycles_t time2,
805 		struct bau_control *bcp, struct ptc_stats *stat,
806 		int completion_status, int try)
807 {
808 	cycles_t elapsed;
809 
810 	if (time2 > time1) {
811 		elapsed = time2 - time1;
812 		stat->s_time += elapsed;
813 
814 		if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
815 			bcp->period_requests++;
816 			bcp->period_time += elapsed;
817 			if ((elapsed > congested_cycles) &&
818 			    (bcp->period_requests > bcp->cong_reps))
819 				disable_for_congestion(bcp, stat);
820 		}
821 	} else
822 		stat->s_requestor--;
823 
824 	if (completion_status == FLUSH_COMPLETE && try > 1)
825 		stat->s_retriesok++;
826 	else if (completion_status == FLUSH_GIVEUP)
827 		stat->s_giveup++;
828 }
829 
830 /*
831  * Because of a uv1 hardware bug only a limited number of concurrent
832  * requests can be made.
833  */
uv1_throttle(struct bau_control * hmaster,struct ptc_stats * stat)834 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
835 {
836 	spinlock_t *lock = &hmaster->uvhub_lock;
837 	atomic_t *v;
838 
839 	v = &hmaster->active_descriptor_count;
840 	if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
841 		stat->s_throttles++;
842 		do {
843 			cpu_relax();
844 		} while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
845 	}
846 }
847 
848 /*
849  * Handle the completion status of a message send.
850  */
handle_cmplt(int completion_status,struct bau_desc * bau_desc,struct bau_control * bcp,struct bau_control * hmaster,struct ptc_stats * stat)851 static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
852 			struct bau_control *bcp, struct bau_control *hmaster,
853 			struct ptc_stats *stat)
854 {
855 	if (completion_status == FLUSH_RETRY_PLUGGED)
856 		destination_plugged(bau_desc, bcp, hmaster, stat);
857 	else if (completion_status == FLUSH_RETRY_TIMEOUT)
858 		destination_timeout(bau_desc, bcp, hmaster, stat);
859 }
860 
861 /*
862  * Send a broadcast and wait for it to complete.
863  *
864  * The flush_mask contains the cpus the broadcast is to be sent to including
865  * cpus that are on the local uvhub.
866  *
867  * Returns 0 if all flushing represented in the mask was done.
868  * Returns 1 if it gives up entirely and the original cpu mask is to be
869  * returned to the kernel.
870  */
uv_flush_send_and_wait(struct cpumask * flush_mask,struct bau_control * bcp)871 int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
872 {
873 	int seq_number = 0;
874 	int completion_stat = 0;
875 	int uv1 = 0;
876 	long try = 0;
877 	unsigned long index;
878 	cycles_t time1;
879 	cycles_t time2;
880 	struct ptc_stats *stat = bcp->statp;
881 	struct bau_control *hmaster = bcp->uvhub_master;
882 	struct uv1_bau_msg_header *uv1_hdr = NULL;
883 	struct uv2_bau_msg_header *uv2_hdr = NULL;
884 	struct bau_desc *bau_desc;
885 
886 	if (bcp->uvhub_version == 1)
887 		uv1_throttle(hmaster, stat);
888 
889 	while (hmaster->uvhub_quiesce)
890 		cpu_relax();
891 
892 	time1 = get_cycles();
893 	do {
894 		bau_desc = bcp->descriptor_base;
895 		bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
896 		if (bcp->uvhub_version == 1) {
897 			uv1 = 1;
898 			uv1_hdr = &bau_desc->header.uv1_hdr;
899 		} else
900 			uv2_hdr = &bau_desc->header.uv2_hdr;
901 		if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
902 			if (uv1)
903 				uv1_hdr->msg_type = MSG_REGULAR;
904 			else
905 				uv2_hdr->msg_type = MSG_REGULAR;
906 			seq_number = bcp->message_number++;
907 		} else {
908 			if (uv1)
909 				uv1_hdr->msg_type = MSG_RETRY;
910 			else
911 				uv2_hdr->msg_type = MSG_RETRY;
912 			stat->s_retry_messages++;
913 		}
914 
915 		if (uv1)
916 			uv1_hdr->sequence = seq_number;
917 		else
918 			uv2_hdr->sequence = seq_number;
919 		index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
920 		bcp->send_message = get_cycles();
921 
922 		write_mmr_activation(index);
923 
924 		try++;
925 		completion_stat = wait_completion(bau_desc, bcp, try);
926 		/* UV2: wait_completion() may change the bcp->using_desc */
927 
928 		handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
929 
930 		if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
931 			bcp->ipi_attempts = 0;
932 			completion_stat = FLUSH_GIVEUP;
933 			break;
934 		}
935 		cpu_relax();
936 	} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
937 		 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
938 		 (completion_stat == FLUSH_RETRY_TIMEOUT));
939 
940 	time2 = get_cycles();
941 
942 	count_max_concurr(completion_stat, bcp, hmaster);
943 
944 	while (hmaster->uvhub_quiesce)
945 		cpu_relax();
946 
947 	atomic_dec(&hmaster->active_descriptor_count);
948 
949 	record_send_stats(time1, time2, bcp, stat, completion_stat, try);
950 
951 	if (completion_stat == FLUSH_GIVEUP)
952 		/* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
953 		return 1;
954 	return 0;
955 }
956 
957 /*
958  * The BAU is disabled. When the disabled time period has expired, the cpu
959  * that disabled it must re-enable it.
960  * Return 0 if it is re-enabled for all cpus.
961  */
check_enable(struct bau_control * bcp,struct ptc_stats * stat)962 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
963 {
964 	int tcpu;
965 	struct bau_control *tbcp;
966 
967 	if (bcp->set_bau_off) {
968 		if (get_cycles() >= bcp->set_bau_on_time) {
969 			stat->s_bau_reenabled++;
970 			baudisabled = 0;
971 			for_each_present_cpu(tcpu) {
972 				tbcp = &per_cpu(bau_control, tcpu);
973 				tbcp->baudisabled = 0;
974 				tbcp->period_requests = 0;
975 				tbcp->period_time = 0;
976 			}
977 			return 0;
978 		}
979 	}
980 	return -1;
981 }
982 
record_send_statistics(struct ptc_stats * stat,int locals,int hubs,int remotes,struct bau_desc * bau_desc)983 static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
984 				int remotes, struct bau_desc *bau_desc)
985 {
986 	stat->s_requestor++;
987 	stat->s_ntargcpu += remotes + locals;
988 	stat->s_ntargremotes += remotes;
989 	stat->s_ntarglocals += locals;
990 
991 	/* uvhub statistics */
992 	hubs = bau_uvhub_weight(&bau_desc->distribution);
993 	if (locals) {
994 		stat->s_ntarglocaluvhub++;
995 		stat->s_ntargremoteuvhub += (hubs - 1);
996 	} else
997 		stat->s_ntargremoteuvhub += hubs;
998 
999 	stat->s_ntarguvhub += hubs;
1000 
1001 	if (hubs >= 16)
1002 		stat->s_ntarguvhub16++;
1003 	else if (hubs >= 8)
1004 		stat->s_ntarguvhub8++;
1005 	else if (hubs >= 4)
1006 		stat->s_ntarguvhub4++;
1007 	else if (hubs >= 2)
1008 		stat->s_ntarguvhub2++;
1009 	else
1010 		stat->s_ntarguvhub1++;
1011 }
1012 
1013 /*
1014  * Translate a cpu mask to the uvhub distribution mask in the BAU
1015  * activation descriptor.
1016  */
set_distrib_bits(struct cpumask * flush_mask,struct bau_control * bcp,struct bau_desc * bau_desc,int * localsp,int * remotesp)1017 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1018 			struct bau_desc *bau_desc, int *localsp, int *remotesp)
1019 {
1020 	int cpu;
1021 	int pnode;
1022 	int cnt = 0;
1023 	struct hub_and_pnode *hpp;
1024 
1025 	for_each_cpu(cpu, flush_mask) {
1026 		/*
1027 		 * The distribution vector is a bit map of pnodes, relative
1028 		 * to the partition base pnode (and the partition base nasid
1029 		 * in the header).
1030 		 * Translate cpu to pnode and hub using a local memory array.
1031 		 */
1032 		hpp = &bcp->socket_master->thp[cpu];
1033 		pnode = hpp->pnode - bcp->partition_base_pnode;
1034 		bau_uvhub_set(pnode, &bau_desc->distribution);
1035 		cnt++;
1036 		if (hpp->uvhub == bcp->uvhub)
1037 			(*localsp)++;
1038 		else
1039 			(*remotesp)++;
1040 	}
1041 	if (!cnt)
1042 		return 1;
1043 	return 0;
1044 }
1045 
1046 /*
1047  * globally purge translation cache of a virtual address or all TLB's
1048  * @cpumask: mask of all cpu's in which the address is to be removed
1049  * @mm: mm_struct containing virtual address range
1050  * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
1051  * @cpu: the current cpu
1052  *
1053  * This is the entry point for initiating any UV global TLB shootdown.
1054  *
1055  * Purges the translation caches of all specified processors of the given
1056  * virtual address, or purges all TLB's on specified processors.
1057  *
1058  * The caller has derived the cpumask from the mm_struct.  This function
1059  * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
1060  *
1061  * The cpumask is converted into a uvhubmask of the uvhubs containing
1062  * those cpus.
1063  *
1064  * Note that this function should be called with preemption disabled.
1065  *
1066  * Returns NULL if all remote flushing was done.
1067  * Returns pointer to cpumask if some remote flushing remains to be
1068  * done.  The returned pointer is valid till preemption is re-enabled.
1069  */
uv_flush_tlb_others(const struct cpumask * cpumask,struct mm_struct * mm,unsigned long va,unsigned int cpu)1070 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1071 				struct mm_struct *mm, unsigned long va,
1072 				unsigned int cpu)
1073 {
1074 	int locals = 0;
1075 	int remotes = 0;
1076 	int hubs = 0;
1077 	struct bau_desc *bau_desc;
1078 	struct cpumask *flush_mask;
1079 	struct ptc_stats *stat;
1080 	struct bau_control *bcp;
1081 
1082 	/* kernel was booted 'nobau' */
1083 	if (nobau)
1084 		return cpumask;
1085 
1086 	bcp = &per_cpu(bau_control, cpu);
1087 	stat = bcp->statp;
1088 
1089 	/* bau was disabled due to slow response */
1090 	if (bcp->baudisabled) {
1091 		if (check_enable(bcp, stat))
1092 			return cpumask;
1093 	}
1094 
1095 	/*
1096 	 * Each sending cpu has a per-cpu mask which it fills from the caller's
1097 	 * cpu mask.  All cpus are converted to uvhubs and copied to the
1098 	 * activation descriptor.
1099 	 */
1100 	flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
1101 	/* don't actually do a shootdown of the local cpu */
1102 	cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
1103 
1104 	if (cpu_isset(cpu, *cpumask))
1105 		stat->s_ntargself++;
1106 
1107 	bau_desc = bcp->descriptor_base;
1108 	bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
1109 	bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
1110 	if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
1111 		return NULL;
1112 
1113 	record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1114 
1115 	bau_desc->payload.address = va;
1116 	bau_desc->payload.sending_cpu = cpu;
1117 	/*
1118 	 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1119 	 * or 1 if it gave up and the original cpumask should be returned.
1120 	 */
1121 	if (!uv_flush_send_and_wait(flush_mask, bcp))
1122 		return NULL;
1123 	else
1124 		return cpumask;
1125 }
1126 
1127 /*
1128  * Search the message queue for any 'other' message with the same software
1129  * acknowledge resource bit vector.
1130  */
find_another_by_swack(struct bau_pq_entry * msg,struct bau_control * bcp,unsigned char swack_vec)1131 struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1132 			struct bau_control *bcp, unsigned char swack_vec)
1133 {
1134 	struct bau_pq_entry *msg_next = msg + 1;
1135 
1136 	if (msg_next > bcp->queue_last)
1137 		msg_next = bcp->queue_first;
1138 	while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
1139 		if (msg_next->swack_vec == swack_vec)
1140 			return msg_next;
1141 		msg_next++;
1142 		if (msg_next > bcp->queue_last)
1143 			msg_next = bcp->queue_first;
1144 	}
1145 	return NULL;
1146 }
1147 
1148 /*
1149  * UV2 needs to work around a bug in which an arriving message has not
1150  * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1151  * Such a message must be ignored.
1152  */
process_uv2_message(struct msg_desc * mdp,struct bau_control * bcp)1153 void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1154 {
1155 	unsigned long mmr_image;
1156 	unsigned char swack_vec;
1157 	struct bau_pq_entry *msg = mdp->msg;
1158 	struct bau_pq_entry *other_msg;
1159 
1160 	mmr_image = read_mmr_sw_ack();
1161 	swack_vec = msg->swack_vec;
1162 
1163 	if ((swack_vec & mmr_image) == 0) {
1164 		/*
1165 		 * This message was assigned a swack resource, but no
1166 		 * reserved acknowlegment is pending.
1167 		 * The bug has prevented this message from setting the MMR.
1168 		 * And no other message has used the same sw_ack resource.
1169 		 * Do the requested shootdown but do not reply to the msg.
1170 		 * (the 0 means make no acknowledge)
1171 		 */
1172 		bau_process_message(mdp, bcp, 0);
1173 		return;
1174 	}
1175 
1176 	/*
1177 	 * Some message has set the MMR 'pending' bit; it might have been
1178 	 * another message.  Look for that message.
1179 	 */
1180 	other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
1181 	if (other_msg) {
1182 		/* There is another.  Do not ack the current one. */
1183 		bau_process_message(mdp, bcp, 0);
1184 		/*
1185 		 * Let the natural processing of that message acknowledge
1186 		 * it. Don't get the processing of sw_ack's out of order.
1187 		 */
1188 		return;
1189 	}
1190 
1191 	/*
1192 	 * There is no other message using this sw_ack, so it is safe to
1193 	 * acknowledge it.
1194 	 */
1195 	bau_process_message(mdp, bcp, 1);
1196 
1197 	return;
1198 }
1199 
1200 /*
1201  * The BAU message interrupt comes here. (registered by set_intr_gate)
1202  * See entry_64.S
1203  *
1204  * We received a broadcast assist message.
1205  *
1206  * Interrupts are disabled; this interrupt could represent
1207  * the receipt of several messages.
1208  *
1209  * All cores/threads on this hub get this interrupt.
1210  * The last one to see it does the software ack.
1211  * (the resource will not be freed until noninterruptable cpus see this
1212  *  interrupt; hardware may timeout the s/w ack and reply ERROR)
1213  */
uv_bau_message_interrupt(struct pt_regs * regs)1214 void uv_bau_message_interrupt(struct pt_regs *regs)
1215 {
1216 	int count = 0;
1217 	cycles_t time_start;
1218 	struct bau_pq_entry *msg;
1219 	struct bau_control *bcp;
1220 	struct ptc_stats *stat;
1221 	struct msg_desc msgdesc;
1222 
1223 	ack_APIC_irq();
1224 	time_start = get_cycles();
1225 
1226 	bcp = &per_cpu(bau_control, smp_processor_id());
1227 	stat = bcp->statp;
1228 
1229 	msgdesc.queue_first = bcp->queue_first;
1230 	msgdesc.queue_last = bcp->queue_last;
1231 
1232 	msg = bcp->bau_msg_head;
1233 	while (msg->swack_vec) {
1234 		count++;
1235 
1236 		msgdesc.msg_slot = msg - msgdesc.queue_first;
1237 		msgdesc.msg = msg;
1238 		if (bcp->uvhub_version == 2)
1239 			process_uv2_message(&msgdesc, bcp);
1240 		else
1241 			bau_process_message(&msgdesc, bcp, 1);
1242 
1243 		msg++;
1244 		if (msg > msgdesc.queue_last)
1245 			msg = msgdesc.queue_first;
1246 		bcp->bau_msg_head = msg;
1247 	}
1248 	stat->d_time += (get_cycles() - time_start);
1249 	if (!count)
1250 		stat->d_nomsg++;
1251 	else if (count > 1)
1252 		stat->d_multmsg++;
1253 }
1254 
1255 /*
1256  * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1257  * shootdown message timeouts enabled.  The timeout does not cause
1258  * an interrupt, but causes an error message to be returned to
1259  * the sender.
1260  */
enable_timeouts(void)1261 static void __init enable_timeouts(void)
1262 {
1263 	int uvhub;
1264 	int nuvhubs;
1265 	int pnode;
1266 	unsigned long mmr_image;
1267 
1268 	nuvhubs = uv_num_possible_blades();
1269 
1270 	for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1271 		if (!uv_blade_nr_possible_cpus(uvhub))
1272 			continue;
1273 
1274 		pnode = uv_blade_to_pnode(uvhub);
1275 		mmr_image = read_mmr_misc_control(pnode);
1276 		/*
1277 		 * Set the timeout period and then lock it in, in three
1278 		 * steps; captures and locks in the period.
1279 		 *
1280 		 * To program the period, the SOFT_ACK_MODE must be off.
1281 		 */
1282 		mmr_image &= ~(1L << SOFTACK_MSHIFT);
1283 		write_mmr_misc_control(pnode, mmr_image);
1284 		/*
1285 		 * Set the 4-bit period.
1286 		 */
1287 		mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1288 		mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1289 		write_mmr_misc_control(pnode, mmr_image);
1290 		/*
1291 		 * UV1:
1292 		 * Subsequent reversals of the timebase bit (3) cause an
1293 		 * immediate timeout of one or all INTD resources as
1294 		 * indicated in bits 2:0 (7 causes all of them to timeout).
1295 		 */
1296 		mmr_image |= (1L << SOFTACK_MSHIFT);
1297 		if (is_uv2_hub()) {
1298 			mmr_image &= ~(1L << UV2_LEG_SHFT);
1299 			mmr_image |= (1L << UV2_EXT_SHFT);
1300 		}
1301 		write_mmr_misc_control(pnode, mmr_image);
1302 	}
1303 }
1304 
ptc_seq_start(struct seq_file * file,loff_t * offset)1305 static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1306 {
1307 	if (*offset < num_possible_cpus())
1308 		return offset;
1309 	return NULL;
1310 }
1311 
ptc_seq_next(struct seq_file * file,void * data,loff_t * offset)1312 static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1313 {
1314 	(*offset)++;
1315 	if (*offset < num_possible_cpus())
1316 		return offset;
1317 	return NULL;
1318 }
1319 
ptc_seq_stop(struct seq_file * file,void * data)1320 static void ptc_seq_stop(struct seq_file *file, void *data)
1321 {
1322 }
1323 
usec_2_cycles(unsigned long microsec)1324 static inline unsigned long long usec_2_cycles(unsigned long microsec)
1325 {
1326 	unsigned long ns;
1327 	unsigned long long cyc;
1328 
1329 	ns = microsec * 1000;
1330 	cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1331 	return cyc;
1332 }
1333 
1334 /*
1335  * Display the statistics thru /proc/sgi_uv/ptc_statistics
1336  * 'data' points to the cpu number
1337  * Note: see the descriptions in stat_description[].
1338  */
ptc_seq_show(struct seq_file * file,void * data)1339 static int ptc_seq_show(struct seq_file *file, void *data)
1340 {
1341 	struct ptc_stats *stat;
1342 	int cpu;
1343 
1344 	cpu = *(loff_t *)data;
1345 	if (!cpu) {
1346 		seq_printf(file,
1347 			"# cpu sent stime self locals remotes ncpus localhub ");
1348 		seq_printf(file,
1349 			"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1350 		seq_printf(file,
1351 		    "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
1352 		seq_printf(file,
1353 			"resetp resett giveup sto bz throt swack recv rtime ");
1354 		seq_printf(file,
1355 			"all one mult none retry canc nocan reset rcan ");
1356 		seq_printf(file,
1357 			"disable enable wars warshw warwaits\n");
1358 	}
1359 	if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1360 		stat = &per_cpu(ptcstats, cpu);
1361 		/* source side statistics */
1362 		seq_printf(file,
1363 			"cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1364 			   cpu, stat->s_requestor, cycles_2_us(stat->s_time),
1365 			   stat->s_ntargself, stat->s_ntarglocals,
1366 			   stat->s_ntargremotes, stat->s_ntargcpu,
1367 			   stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1368 			   stat->s_ntarguvhub, stat->s_ntarguvhub16);
1369 		seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
1370 			   stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1371 			   stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1372 			   stat->s_dtimeout, stat->s_strongnacks);
1373 		seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1374 			   stat->s_retry_messages, stat->s_retriesok,
1375 			   stat->s_resets_plug, stat->s_resets_timeout,
1376 			   stat->s_giveup, stat->s_stimeout,
1377 			   stat->s_busy, stat->s_throttles);
1378 
1379 		/* destination side statistics */
1380 		seq_printf(file,
1381 			   "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1382 			   read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
1383 			   stat->d_requestee, cycles_2_us(stat->d_time),
1384 			   stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1385 			   stat->d_nomsg, stat->d_retries, stat->d_canceled,
1386 			   stat->d_nocanceled, stat->d_resets,
1387 			   stat->d_rcanceled);
1388 		seq_printf(file, "%ld %ld %ld %ld %ld\n",
1389 			stat->s_bau_disabled, stat->s_bau_reenabled,
1390 			stat->s_uv2_wars, stat->s_uv2_wars_hw,
1391 			stat->s_uv2_war_waits);
1392 	}
1393 	return 0;
1394 }
1395 
1396 /*
1397  * Display the tunables thru debugfs
1398  */
tunables_read(struct file * file,char __user * userbuf,size_t count,loff_t * ppos)1399 static ssize_t tunables_read(struct file *file, char __user *userbuf,
1400 				size_t count, loff_t *ppos)
1401 {
1402 	char *buf;
1403 	int ret;
1404 
1405 	buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1406 		"max_concur plugged_delay plugsb4reset",
1407 		"timeoutsb4reset ipi_reset_limit complete_threshold",
1408 		"congested_response_us congested_reps congested_period",
1409 		max_concurr, plugged_delay, plugsb4reset,
1410 		timeoutsb4reset, ipi_reset_limit, complete_threshold,
1411 		congested_respns_us, congested_reps, congested_period);
1412 
1413 	if (!buf)
1414 		return -ENOMEM;
1415 
1416 	ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1417 	kfree(buf);
1418 	return ret;
1419 }
1420 
1421 /*
1422  * handle a write to /proc/sgi_uv/ptc_statistics
1423  * -1: reset the statistics
1424  *  0: display meaning of the statistics
1425  */
ptc_proc_write(struct file * file,const char __user * user,size_t count,loff_t * data)1426 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1427 				size_t count, loff_t *data)
1428 {
1429 	int cpu;
1430 	int i;
1431 	int elements;
1432 	long input_arg;
1433 	char optstr[64];
1434 	struct ptc_stats *stat;
1435 
1436 	if (count == 0 || count > sizeof(optstr))
1437 		return -EINVAL;
1438 	if (copy_from_user(optstr, user, count))
1439 		return -EFAULT;
1440 	optstr[count - 1] = '\0';
1441 
1442 	if (strict_strtol(optstr, 10, &input_arg) < 0) {
1443 		printk(KERN_DEBUG "%s is invalid\n", optstr);
1444 		return -EINVAL;
1445 	}
1446 
1447 	if (input_arg == 0) {
1448 		elements = sizeof(stat_description)/sizeof(*stat_description);
1449 		printk(KERN_DEBUG "# cpu:      cpu number\n");
1450 		printk(KERN_DEBUG "Sender statistics:\n");
1451 		for (i = 0; i < elements; i++)
1452 			printk(KERN_DEBUG "%s\n", stat_description[i]);
1453 	} else if (input_arg == -1) {
1454 		for_each_present_cpu(cpu) {
1455 			stat = &per_cpu(ptcstats, cpu);
1456 			memset(stat, 0, sizeof(struct ptc_stats));
1457 		}
1458 	}
1459 
1460 	return count;
1461 }
1462 
local_atoi(const char * name)1463 static int local_atoi(const char *name)
1464 {
1465 	int val = 0;
1466 
1467 	for (;; name++) {
1468 		switch (*name) {
1469 		case '0' ... '9':
1470 			val = 10*val+(*name-'0');
1471 			break;
1472 		default:
1473 			return val;
1474 		}
1475 	}
1476 }
1477 
1478 /*
1479  * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1480  * Zero values reset them to defaults.
1481  */
parse_tunables_write(struct bau_control * bcp,char * instr,int count)1482 static int parse_tunables_write(struct bau_control *bcp, char *instr,
1483 				int count)
1484 {
1485 	char *p;
1486 	char *q;
1487 	int cnt = 0;
1488 	int val;
1489 	int e = sizeof(tunables) / sizeof(*tunables);
1490 
1491 	p = instr + strspn(instr, WHITESPACE);
1492 	q = p;
1493 	for (; *p; p = q + strspn(q, WHITESPACE)) {
1494 		q = p + strcspn(p, WHITESPACE);
1495 		cnt++;
1496 		if (q == p)
1497 			break;
1498 	}
1499 	if (cnt != e) {
1500 		printk(KERN_INFO "bau tunable error: should be %d values\n", e);
1501 		return -EINVAL;
1502 	}
1503 
1504 	p = instr + strspn(instr, WHITESPACE);
1505 	q = p;
1506 	for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1507 		q = p + strcspn(p, WHITESPACE);
1508 		val = local_atoi(p);
1509 		switch (cnt) {
1510 		case 0:
1511 			if (val == 0) {
1512 				max_concurr = MAX_BAU_CONCURRENT;
1513 				max_concurr_const = MAX_BAU_CONCURRENT;
1514 				continue;
1515 			}
1516 			if (val < 1 || val > bcp->cpus_in_uvhub) {
1517 				printk(KERN_DEBUG
1518 				"Error: BAU max concurrent %d is invalid\n",
1519 				val);
1520 				return -EINVAL;
1521 			}
1522 			max_concurr = val;
1523 			max_concurr_const = val;
1524 			continue;
1525 		default:
1526 			if (val == 0)
1527 				*tunables[cnt].tunp = tunables[cnt].deflt;
1528 			else
1529 				*tunables[cnt].tunp = val;
1530 			continue;
1531 		}
1532 		if (q == p)
1533 			break;
1534 	}
1535 	return 0;
1536 }
1537 
1538 /*
1539  * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1540  */
tunables_write(struct file * file,const char __user * user,size_t count,loff_t * data)1541 static ssize_t tunables_write(struct file *file, const char __user *user,
1542 				size_t count, loff_t *data)
1543 {
1544 	int cpu;
1545 	int ret;
1546 	char instr[100];
1547 	struct bau_control *bcp;
1548 
1549 	if (count == 0 || count > sizeof(instr)-1)
1550 		return -EINVAL;
1551 	if (copy_from_user(instr, user, count))
1552 		return -EFAULT;
1553 
1554 	instr[count] = '\0';
1555 
1556 	cpu = get_cpu();
1557 	bcp = &per_cpu(bau_control, cpu);
1558 	ret = parse_tunables_write(bcp, instr, count);
1559 	put_cpu();
1560 	if (ret)
1561 		return ret;
1562 
1563 	for_each_present_cpu(cpu) {
1564 		bcp = &per_cpu(bau_control, cpu);
1565 		bcp->max_concurr =		max_concurr;
1566 		bcp->max_concurr_const =	max_concurr;
1567 		bcp->plugged_delay =		plugged_delay;
1568 		bcp->plugsb4reset =		plugsb4reset;
1569 		bcp->timeoutsb4reset =		timeoutsb4reset;
1570 		bcp->ipi_reset_limit =		ipi_reset_limit;
1571 		bcp->complete_threshold =	complete_threshold;
1572 		bcp->cong_response_us =		congested_respns_us;
1573 		bcp->cong_reps =		congested_reps;
1574 		bcp->cong_period =		congested_period;
1575 	}
1576 	return count;
1577 }
1578 
1579 static const struct seq_operations uv_ptc_seq_ops = {
1580 	.start		= ptc_seq_start,
1581 	.next		= ptc_seq_next,
1582 	.stop		= ptc_seq_stop,
1583 	.show		= ptc_seq_show
1584 };
1585 
ptc_proc_open(struct inode * inode,struct file * file)1586 static int ptc_proc_open(struct inode *inode, struct file *file)
1587 {
1588 	return seq_open(file, &uv_ptc_seq_ops);
1589 }
1590 
tunables_open(struct inode * inode,struct file * file)1591 static int tunables_open(struct inode *inode, struct file *file)
1592 {
1593 	return 0;
1594 }
1595 
1596 static const struct file_operations proc_uv_ptc_operations = {
1597 	.open		= ptc_proc_open,
1598 	.read		= seq_read,
1599 	.write		= ptc_proc_write,
1600 	.llseek		= seq_lseek,
1601 	.release	= seq_release,
1602 };
1603 
1604 static const struct file_operations tunables_fops = {
1605 	.open		= tunables_open,
1606 	.read		= tunables_read,
1607 	.write		= tunables_write,
1608 	.llseek		= default_llseek,
1609 };
1610 
uv_ptc_init(void)1611 static int __init uv_ptc_init(void)
1612 {
1613 	struct proc_dir_entry *proc_uv_ptc;
1614 
1615 	if (!is_uv_system())
1616 		return 0;
1617 
1618 	proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1619 				  &proc_uv_ptc_operations);
1620 	if (!proc_uv_ptc) {
1621 		printk(KERN_ERR "unable to create %s proc entry\n",
1622 		       UV_PTC_BASENAME);
1623 		return -EINVAL;
1624 	}
1625 
1626 	tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1627 	if (!tunables_dir) {
1628 		printk(KERN_ERR "unable to create debugfs directory %s\n",
1629 		       UV_BAU_TUNABLES_DIR);
1630 		return -EINVAL;
1631 	}
1632 	tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1633 					tunables_dir, NULL, &tunables_fops);
1634 	if (!tunables_file) {
1635 		printk(KERN_ERR "unable to create debugfs file %s\n",
1636 		       UV_BAU_TUNABLES_FILE);
1637 		return -EINVAL;
1638 	}
1639 	return 0;
1640 }
1641 
1642 /*
1643  * Initialize the sending side's sending buffers.
1644  */
activation_descriptor_init(int node,int pnode,int base_pnode)1645 static void activation_descriptor_init(int node, int pnode, int base_pnode)
1646 {
1647 	int i;
1648 	int cpu;
1649 	int uv1 = 0;
1650 	unsigned long gpa;
1651 	unsigned long m;
1652 	unsigned long n;
1653 	size_t dsize;
1654 	struct bau_desc *bau_desc;
1655 	struct bau_desc *bd2;
1656 	struct uv1_bau_msg_header *uv1_hdr;
1657 	struct uv2_bau_msg_header *uv2_hdr;
1658 	struct bau_control *bcp;
1659 
1660 	/*
1661 	 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1662 	 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1663 	 */
1664 	dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1665 	bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1666 	BUG_ON(!bau_desc);
1667 
1668 	gpa = uv_gpa(bau_desc);
1669 	n = uv_gpa_to_gnode(gpa);
1670 	m = uv_gpa_to_offset(gpa);
1671 	if (is_uv1_hub())
1672 		uv1 = 1;
1673 
1674 	/* the 14-bit pnode */
1675 	write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1676 	/*
1677 	 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1678 	 * cpu even though we only use the first one; one descriptor can
1679 	 * describe a broadcast to 256 uv hubs.
1680 	 */
1681 	for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1682 		memset(bd2, 0, sizeof(struct bau_desc));
1683 		if (uv1) {
1684 			uv1_hdr = &bd2->header.uv1_hdr;
1685 			uv1_hdr->swack_flag =	1;
1686 			/*
1687 			 * The base_dest_nasid set in the message header
1688 			 * is the nasid of the first uvhub in the partition.
1689 			 * The bit map will indicate destination pnode numbers
1690 			 * relative to that base. They may not be consecutive
1691 			 * if nasid striding is being used.
1692 			 */
1693 			uv1_hdr->base_dest_nasid =
1694 						UV_PNODE_TO_NASID(base_pnode);
1695 			uv1_hdr->dest_subnodeid =	UV_LB_SUBNODEID;
1696 			uv1_hdr->command =		UV_NET_ENDPOINT_INTD;
1697 			uv1_hdr->int_both =		1;
1698 			/*
1699 			 * all others need to be set to zero:
1700 			 *   fairness chaining multilevel count replied_to
1701 			 */
1702 		} else {
1703 			uv2_hdr = &bd2->header.uv2_hdr;
1704 			uv2_hdr->swack_flag =	1;
1705 			uv2_hdr->base_dest_nasid =
1706 						UV_PNODE_TO_NASID(base_pnode);
1707 			uv2_hdr->dest_subnodeid =	UV_LB_SUBNODEID;
1708 			uv2_hdr->command =		UV_NET_ENDPOINT_INTD;
1709 		}
1710 	}
1711 	for_each_present_cpu(cpu) {
1712 		if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1713 			continue;
1714 		bcp = &per_cpu(bau_control, cpu);
1715 		bcp->descriptor_base = bau_desc;
1716 	}
1717 }
1718 
1719 /*
1720  * initialize the destination side's receiving buffers
1721  * entered for each uvhub in the partition
1722  * - node is first node (kernel memory notion) on the uvhub
1723  * - pnode is the uvhub's physical identifier
1724  */
pq_init(int node,int pnode)1725 static void pq_init(int node, int pnode)
1726 {
1727 	int cpu;
1728 	size_t plsize;
1729 	char *cp;
1730 	void *vp;
1731 	unsigned long pn;
1732 	unsigned long first;
1733 	unsigned long pn_first;
1734 	unsigned long last;
1735 	struct bau_pq_entry *pqp;
1736 	struct bau_control *bcp;
1737 
1738 	plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1739 	vp = kmalloc_node(plsize, GFP_KERNEL, node);
1740 	pqp = (struct bau_pq_entry *)vp;
1741 	BUG_ON(!pqp);
1742 
1743 	cp = (char *)pqp + 31;
1744 	pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1745 
1746 	for_each_present_cpu(cpu) {
1747 		if (pnode != uv_cpu_to_pnode(cpu))
1748 			continue;
1749 		/* for every cpu on this pnode: */
1750 		bcp = &per_cpu(bau_control, cpu);
1751 		bcp->queue_first	= pqp;
1752 		bcp->bau_msg_head	= pqp;
1753 		bcp->queue_last		= pqp + (DEST_Q_SIZE - 1);
1754 	}
1755 	/*
1756 	 * need the gnode of where the memory was really allocated
1757 	 */
1758 	pn = uv_gpa_to_gnode(uv_gpa(pqp));
1759 	first = uv_physnodeaddr(pqp);
1760 	pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1761 	last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1762 	write_mmr_payload_first(pnode, pn_first);
1763 	write_mmr_payload_tail(pnode, first);
1764 	write_mmr_payload_last(pnode, last);
1765 	write_gmmr_sw_ack(pnode, 0xffffUL);
1766 
1767 	/* in effect, all msg_type's are set to MSG_NOOP */
1768 	memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1769 }
1770 
1771 /*
1772  * Initialization of each UV hub's structures
1773  */
init_uvhub(int uvhub,int vector,int base_pnode)1774 static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1775 {
1776 	int node;
1777 	int pnode;
1778 	unsigned long apicid;
1779 
1780 	node = uvhub_to_first_node(uvhub);
1781 	pnode = uv_blade_to_pnode(uvhub);
1782 
1783 	activation_descriptor_init(node, pnode, base_pnode);
1784 
1785 	pq_init(node, pnode);
1786 	/*
1787 	 * The below initialization can't be in firmware because the
1788 	 * messaging IRQ will be determined by the OS.
1789 	 */
1790 	apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1791 	write_mmr_data_config(pnode, ((apicid << 32) | vector));
1792 }
1793 
1794 /*
1795  * We will set BAU_MISC_CONTROL with a timeout period.
1796  * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1797  * So the destination timeout period has to be calculated from them.
1798  */
calculate_destination_timeout(void)1799 static int calculate_destination_timeout(void)
1800 {
1801 	unsigned long mmr_image;
1802 	int mult1;
1803 	int mult2;
1804 	int index;
1805 	int base;
1806 	int ret;
1807 	unsigned long ts_ns;
1808 
1809 	if (is_uv1_hub()) {
1810 		mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1811 		mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1812 		index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1813 		mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1814 		mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1815 		base = timeout_base_ns[index];
1816 		ts_ns = base * mult1 * mult2;
1817 		ret = ts_ns / 1000;
1818 	} else {
1819 		/* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
1820 		mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1821 		mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1822 		if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1823 			base = 80;
1824 		else
1825 			base = 10;
1826 		mult1 = mmr_image & UV2_ACK_MASK;
1827 		ret = mult1 * base;
1828 	}
1829 	return ret;
1830 }
1831 
init_per_cpu_tunables(void)1832 static void __init init_per_cpu_tunables(void)
1833 {
1834 	int cpu;
1835 	struct bau_control *bcp;
1836 
1837 	for_each_present_cpu(cpu) {
1838 		bcp = &per_cpu(bau_control, cpu);
1839 		bcp->baudisabled		= 0;
1840 		bcp->statp			= &per_cpu(ptcstats, cpu);
1841 		/* time interval to catch a hardware stay-busy bug */
1842 		bcp->timeout_interval		= usec_2_cycles(2*timeout_us);
1843 		bcp->max_concurr		= max_concurr;
1844 		bcp->max_concurr_const		= max_concurr;
1845 		bcp->plugged_delay		= plugged_delay;
1846 		bcp->plugsb4reset		= plugsb4reset;
1847 		bcp->timeoutsb4reset		= timeoutsb4reset;
1848 		bcp->ipi_reset_limit		= ipi_reset_limit;
1849 		bcp->complete_threshold		= complete_threshold;
1850 		bcp->cong_response_us		= congested_respns_us;
1851 		bcp->cong_reps			= congested_reps;
1852 		bcp->cong_period		= congested_period;
1853 		bcp->clocks_per_100_usec =	usec_2_cycles(100);
1854 		spin_lock_init(&bcp->queue_lock);
1855 		spin_lock_init(&bcp->uvhub_lock);
1856 	}
1857 }
1858 
1859 /*
1860  * Scan all cpus to collect blade and socket summaries.
1861  */
get_cpu_topology(int base_pnode,struct uvhub_desc * uvhub_descs,unsigned char * uvhub_mask)1862 static int __init get_cpu_topology(int base_pnode,
1863 					struct uvhub_desc *uvhub_descs,
1864 					unsigned char *uvhub_mask)
1865 {
1866 	int cpu;
1867 	int pnode;
1868 	int uvhub;
1869 	int socket;
1870 	struct bau_control *bcp;
1871 	struct uvhub_desc *bdp;
1872 	struct socket_desc *sdp;
1873 
1874 	for_each_present_cpu(cpu) {
1875 		bcp = &per_cpu(bau_control, cpu);
1876 
1877 		memset(bcp, 0, sizeof(struct bau_control));
1878 
1879 		pnode = uv_cpu_hub_info(cpu)->pnode;
1880 		if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1881 			printk(KERN_EMERG
1882 				"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1883 				cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1884 			return 1;
1885 		}
1886 
1887 		bcp->osnode = cpu_to_node(cpu);
1888 		bcp->partition_base_pnode = base_pnode;
1889 
1890 		uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1891 		*(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1892 		bdp = &uvhub_descs[uvhub];
1893 
1894 		bdp->num_cpus++;
1895 		bdp->uvhub = uvhub;
1896 		bdp->pnode = pnode;
1897 
1898 		/* kludge: 'assuming' one node per socket, and assuming that
1899 		   disabling a socket just leaves a gap in node numbers */
1900 		socket = bcp->osnode & 1;
1901 		bdp->socket_mask |= (1 << socket);
1902 		sdp = &bdp->socket[socket];
1903 		sdp->cpu_number[sdp->num_cpus] = cpu;
1904 		sdp->num_cpus++;
1905 		if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1906 			printk(KERN_EMERG "%d cpus per socket invalid\n",
1907 				sdp->num_cpus);
1908 			return 1;
1909 		}
1910 	}
1911 	return 0;
1912 }
1913 
1914 /*
1915  * Each socket is to get a local array of pnodes/hubs.
1916  */
make_per_cpu_thp(struct bau_control * smaster)1917 static void make_per_cpu_thp(struct bau_control *smaster)
1918 {
1919 	int cpu;
1920 	size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1921 
1922 	smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1923 	memset(smaster->thp, 0, hpsz);
1924 	for_each_present_cpu(cpu) {
1925 		smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1926 		smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1927 	}
1928 }
1929 
1930 /*
1931  * Each uvhub is to get a local cpumask.
1932  */
make_per_hub_cpumask(struct bau_control * hmaster)1933 static void make_per_hub_cpumask(struct bau_control *hmaster)
1934 {
1935 	int sz = sizeof(cpumask_t);
1936 
1937 	hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1938 }
1939 
1940 /*
1941  * Initialize all the per_cpu information for the cpu's on a given socket,
1942  * given what has been gathered into the socket_desc struct.
1943  * And reports the chosen hub and socket masters back to the caller.
1944  */
scan_sock(struct socket_desc * sdp,struct uvhub_desc * bdp,struct bau_control ** smasterp,struct bau_control ** hmasterp)1945 static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1946 			struct bau_control **smasterp,
1947 			struct bau_control **hmasterp)
1948 {
1949 	int i;
1950 	int cpu;
1951 	struct bau_control *bcp;
1952 
1953 	for (i = 0; i < sdp->num_cpus; i++) {
1954 		cpu = sdp->cpu_number[i];
1955 		bcp = &per_cpu(bau_control, cpu);
1956 		bcp->cpu = cpu;
1957 		if (i == 0) {
1958 			*smasterp = bcp;
1959 			if (!(*hmasterp))
1960 				*hmasterp = bcp;
1961 		}
1962 		bcp->cpus_in_uvhub = bdp->num_cpus;
1963 		bcp->cpus_in_socket = sdp->num_cpus;
1964 		bcp->socket_master = *smasterp;
1965 		bcp->uvhub = bdp->uvhub;
1966 		if (is_uv1_hub())
1967 			bcp->uvhub_version = 1;
1968 		else if (is_uv2_hub())
1969 			bcp->uvhub_version = 2;
1970 		else {
1971 			printk(KERN_EMERG "uvhub version not 1 or 2\n");
1972 			return 1;
1973 		}
1974 		bcp->uvhub_master = *hmasterp;
1975 		bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1976 		bcp->using_desc = bcp->uvhub_cpu;
1977 		if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1978 			printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1979 				bcp->uvhub_cpu);
1980 			return 1;
1981 		}
1982 	}
1983 	return 0;
1984 }
1985 
1986 /*
1987  * Summarize the blade and socket topology into the per_cpu structures.
1988  */
summarize_uvhub_sockets(int nuvhubs,struct uvhub_desc * uvhub_descs,unsigned char * uvhub_mask)1989 static int __init summarize_uvhub_sockets(int nuvhubs,
1990 			struct uvhub_desc *uvhub_descs,
1991 			unsigned char *uvhub_mask)
1992 {
1993 	int socket;
1994 	int uvhub;
1995 	unsigned short socket_mask;
1996 
1997 	for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1998 		struct uvhub_desc *bdp;
1999 		struct bau_control *smaster = NULL;
2000 		struct bau_control *hmaster = NULL;
2001 
2002 		if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2003 			continue;
2004 
2005 		bdp = &uvhub_descs[uvhub];
2006 		socket_mask = bdp->socket_mask;
2007 		socket = 0;
2008 		while (socket_mask) {
2009 			struct socket_desc *sdp;
2010 			if ((socket_mask & 1)) {
2011 				sdp = &bdp->socket[socket];
2012 				if (scan_sock(sdp, bdp, &smaster, &hmaster))
2013 					return 1;
2014 				make_per_cpu_thp(smaster);
2015 			}
2016 			socket++;
2017 			socket_mask = (socket_mask >> 1);
2018 		}
2019 		make_per_hub_cpumask(hmaster);
2020 	}
2021 	return 0;
2022 }
2023 
2024 /*
2025  * initialize the bau_control structure for each cpu
2026  */
init_per_cpu(int nuvhubs,int base_part_pnode)2027 static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2028 {
2029 	unsigned char *uvhub_mask;
2030 	void *vp;
2031 	struct uvhub_desc *uvhub_descs;
2032 
2033 	timeout_us = calculate_destination_timeout();
2034 
2035 	vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2036 	uvhub_descs = (struct uvhub_desc *)vp;
2037 	memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2038 	uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2039 
2040 	if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
2041 		goto fail;
2042 
2043 	if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
2044 		goto fail;
2045 
2046 	kfree(uvhub_descs);
2047 	kfree(uvhub_mask);
2048 	init_per_cpu_tunables();
2049 	return 0;
2050 
2051 fail:
2052 	kfree(uvhub_descs);
2053 	kfree(uvhub_mask);
2054 	return 1;
2055 }
2056 
2057 /*
2058  * Initialization of BAU-related structures
2059  */
uv_bau_init(void)2060 static int __init uv_bau_init(void)
2061 {
2062 	int uvhub;
2063 	int pnode;
2064 	int nuvhubs;
2065 	int cur_cpu;
2066 	int cpus;
2067 	int vector;
2068 	cpumask_var_t *mask;
2069 
2070 	if (!is_uv_system())
2071 		return 0;
2072 
2073 	if (nobau)
2074 		return 0;
2075 
2076 	for_each_possible_cpu(cur_cpu) {
2077 		mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2078 		zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2079 	}
2080 
2081 	nuvhubs = uv_num_possible_blades();
2082 	spin_lock_init(&disable_lock);
2083 	congested_cycles = usec_2_cycles(congested_respns_us);
2084 
2085 	uv_base_pnode = 0x7fffffff;
2086 	for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2087 		cpus = uv_blade_nr_possible_cpus(uvhub);
2088 		if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2089 			uv_base_pnode = uv_blade_to_pnode(uvhub);
2090 	}
2091 
2092 	enable_timeouts();
2093 
2094 	if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2095 		nobau = 1;
2096 		return 0;
2097 	}
2098 
2099 	vector = UV_BAU_MESSAGE;
2100 	for_each_possible_blade(uvhub)
2101 		if (uv_blade_nr_possible_cpus(uvhub))
2102 			init_uvhub(uvhub, vector, uv_base_pnode);
2103 
2104 	alloc_intr_gate(vector, uv_bau_message_intr1);
2105 
2106 	for_each_possible_blade(uvhub) {
2107 		if (uv_blade_nr_possible_cpus(uvhub)) {
2108 			unsigned long val;
2109 			unsigned long mmr;
2110 			pnode = uv_blade_to_pnode(uvhub);
2111 			/* INIT the bau */
2112 			val = 1L << 63;
2113 			write_gmmr_activation(pnode, val);
2114 			mmr = 1; /* should be 1 to broadcast to both sockets */
2115 			if (!is_uv1_hub())
2116 				write_mmr_data_broadcast(pnode, mmr);
2117 		}
2118 	}
2119 
2120 	return 0;
2121 }
2122 core_initcall(uv_bau_init);
2123 fs_initcall(uv_ptc_init);
2124