xref: /linux/arch/powerpc/platforms/pseries/dtl.c (revision c771600c6af14749609b49565ffb4cac2959710d)
1de6cc651SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2fc59a3fcSJeremy Kerr /*
3fc59a3fcSJeremy Kerr  * Virtual Processor Dispatch Trace Log
4fc59a3fcSJeremy Kerr  *
5fc59a3fcSJeremy Kerr  * (C) Copyright IBM Corporation 2009
6fc59a3fcSJeremy Kerr  *
7fc59a3fcSJeremy Kerr  * Author: Jeremy Kerr <jk@ozlabs.org>
8fc59a3fcSJeremy Kerr  */
9fc59a3fcSJeremy Kerr 
105a0e3ad6STejun Heo #include <linux/slab.h>
11872e439aSPaul Mackerras #include <linux/spinlock.h>
12fc59a3fcSJeremy Kerr #include <asm/smp.h>
137c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
14dbf77fedSAneesh Kumar K.V #include <linux/debugfs.h>
15b71a0c29SSachin Sant #include <asm/firmware.h>
16d6bdceb6SPeter Zijlstra #include <asm/dtl.h>
17cf9efce0SPaul Mackerras #include <asm/lppaca.h>
18212bebb4SDeepthi Dharwar #include <asm/plpar_wrappers.h>
198e83e905SMichael Ellerman #include <asm/machdep.h>
20fc59a3fcSJeremy Kerr 
2190d5ce82SNicholas Piggin #ifdef CONFIG_DTL
22fc59a3fcSJeremy Kerr struct dtl {
23fc59a3fcSJeremy Kerr 	struct dtl_entry	*buf;
24fc59a3fcSJeremy Kerr 	int			cpu;
25fc59a3fcSJeremy Kerr 	int			buf_entries;
26fc59a3fcSJeremy Kerr 	u64			last_idx;
27872e439aSPaul Mackerras 	spinlock_t		lock;
28fc59a3fcSJeremy Kerr };
296b7487fcSTejun Heo static DEFINE_PER_CPU(struct dtl, cpu_dtl);
30fc59a3fcSJeremy Kerr 
31515bbc8aSNaveen N. Rao static u8 dtl_event_mask = DTL_LOG_ALL;
32fc59a3fcSJeremy Kerr 
33fc59a3fcSJeremy Kerr 
34fc59a3fcSJeremy Kerr /*
35af442a1bSNishanth Aravamudan  * Size of per-cpu log buffers. Firmware requires that the buffer does
36af442a1bSNishanth Aravamudan  * not cross a 4k boundary.
37fc59a3fcSJeremy Kerr  */
38af442a1bSNishanth Aravamudan static int dtl_buf_entries = N_DISPATCH_LOG;
39fc59a3fcSJeremy Kerr 
40abf917cdSFrederic Weisbecker #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
416ba5aa54SNicholas Piggin 
426ba5aa54SNicholas Piggin /*
436ba5aa54SNicholas Piggin  * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
446ba5aa54SNicholas Piggin  * reading from the dispatch trace log.  If other code wants to consume
456ba5aa54SNicholas Piggin  * DTL entries, it can set this pointer to a function that will get
466ba5aa54SNicholas Piggin  * called once for each DTL entry that gets processed.
476ba5aa54SNicholas Piggin  */
486ba5aa54SNicholas Piggin static void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
496ba5aa54SNicholas Piggin 
50872e439aSPaul Mackerras struct dtl_ring {
51872e439aSPaul Mackerras 	u64	write_index;
52872e439aSPaul Mackerras 	struct dtl_entry *write_ptr;
53872e439aSPaul Mackerras 	struct dtl_entry *buf;
54872e439aSPaul Mackerras 	struct dtl_entry *buf_end;
55872e439aSPaul Mackerras };
56872e439aSPaul Mackerras 
57872e439aSPaul Mackerras static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
58872e439aSPaul Mackerras 
59872e439aSPaul Mackerras static atomic_t dtl_count;
60872e439aSPaul Mackerras 
61872e439aSPaul Mackerras /*
62872e439aSPaul Mackerras  * The cpu accounting code controls the DTL ring buffer, and we get
63872e439aSPaul Mackerras  * given entries as they are processed.
64872e439aSPaul Mackerras  */
consume_dtle(struct dtl_entry * dtle,u64 index)65872e439aSPaul Mackerras static void consume_dtle(struct dtl_entry *dtle, u64 index)
66872e439aSPaul Mackerras {
6769111bacSChristoph Lameter 	struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
68872e439aSPaul Mackerras 	struct dtl_entry *wp = dtlr->write_ptr;
69872e439aSPaul Mackerras 	struct lppaca *vpa = local_paca->lppaca_ptr;
70872e439aSPaul Mackerras 
71872e439aSPaul Mackerras 	if (!wp)
72872e439aSPaul Mackerras 		return;
73872e439aSPaul Mackerras 
74872e439aSPaul Mackerras 	*wp = *dtle;
75872e439aSPaul Mackerras 	barrier();
76872e439aSPaul Mackerras 
77872e439aSPaul Mackerras 	/* check for hypervisor ring buffer overflow, ignore this entry if so */
787ffcf8ecSAnton Blanchard 	if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
79872e439aSPaul Mackerras 		return;
80872e439aSPaul Mackerras 
81872e439aSPaul Mackerras 	++wp;
82872e439aSPaul Mackerras 	if (wp == dtlr->buf_end)
83872e439aSPaul Mackerras 		wp = dtlr->buf;
84872e439aSPaul Mackerras 	dtlr->write_ptr = wp;
85872e439aSPaul Mackerras 
86872e439aSPaul Mackerras 	/* incrementing write_index makes the new entry visible */
87872e439aSPaul Mackerras 	smp_wmb();
88872e439aSPaul Mackerras 	++dtlr->write_index;
89872e439aSPaul Mackerras }
90872e439aSPaul Mackerras 
dtl_start(struct dtl * dtl)91872e439aSPaul Mackerras static int dtl_start(struct dtl *dtl)
92872e439aSPaul Mackerras {
93872e439aSPaul Mackerras 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
94872e439aSPaul Mackerras 
95872e439aSPaul Mackerras 	dtlr->buf = dtl->buf;
96872e439aSPaul Mackerras 	dtlr->buf_end = dtl->buf + dtl->buf_entries;
97872e439aSPaul Mackerras 	dtlr->write_index = 0;
98872e439aSPaul Mackerras 
99872e439aSPaul Mackerras 	/* setting write_ptr enables logging into our buffer */
100872e439aSPaul Mackerras 	smp_wmb();
101872e439aSPaul Mackerras 	dtlr->write_ptr = dtl->buf;
102872e439aSPaul Mackerras 
103872e439aSPaul Mackerras 	/* enable event logging */
104872e439aSPaul Mackerras 	lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
105872e439aSPaul Mackerras 
106872e439aSPaul Mackerras 	dtl_consumer = consume_dtle;
107872e439aSPaul Mackerras 	atomic_inc(&dtl_count);
108872e439aSPaul Mackerras 	return 0;
109872e439aSPaul Mackerras }
110872e439aSPaul Mackerras 
dtl_stop(struct dtl * dtl)111872e439aSPaul Mackerras static void dtl_stop(struct dtl *dtl)
112872e439aSPaul Mackerras {
113872e439aSPaul Mackerras 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
114872e439aSPaul Mackerras 
115872e439aSPaul Mackerras 	dtlr->write_ptr = NULL;
116872e439aSPaul Mackerras 	smp_wmb();
117872e439aSPaul Mackerras 
118872e439aSPaul Mackerras 	dtlr->buf = NULL;
119872e439aSPaul Mackerras 
120872e439aSPaul Mackerras 	/* restore dtl_enable_mask */
1215b3306f0SNaveen N. Rao 	lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
122872e439aSPaul Mackerras 
123872e439aSPaul Mackerras 	if (atomic_dec_and_test(&dtl_count))
124872e439aSPaul Mackerras 		dtl_consumer = NULL;
125872e439aSPaul Mackerras }
126872e439aSPaul Mackerras 
dtl_current_index(struct dtl * dtl)127872e439aSPaul Mackerras static u64 dtl_current_index(struct dtl *dtl)
128872e439aSPaul Mackerras {
129872e439aSPaul Mackerras 	return per_cpu(dtl_rings, dtl->cpu).write_index;
130872e439aSPaul Mackerras }
131872e439aSPaul Mackerras 
132abf917cdSFrederic Weisbecker #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
133872e439aSPaul Mackerras 
dtl_start(struct dtl * dtl)134872e439aSPaul Mackerras static int dtl_start(struct dtl *dtl)
135fc59a3fcSJeremy Kerr {
136fc59a3fcSJeremy Kerr 	unsigned long addr;
137fc59a3fcSJeremy Kerr 	int ret, hwcpu;
138fc59a3fcSJeremy Kerr 
139fc59a3fcSJeremy Kerr 	/* Register our dtl buffer with the hypervisor. The HV expects the
140fc59a3fcSJeremy Kerr 	 * buffer size to be passed in the second word of the buffer */
141db787af1SNaveen N. Rao 	((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
142fc59a3fcSJeremy Kerr 
143fc59a3fcSJeremy Kerr 	hwcpu = get_hard_smp_processor_id(dtl->cpu);
144fc59a3fcSJeremy Kerr 	addr = __pa(dtl->buf);
145fc59a3fcSJeremy Kerr 	ret = register_dtl(hwcpu, addr);
146fc59a3fcSJeremy Kerr 	if (ret) {
147fc59a3fcSJeremy Kerr 		printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
148fc59a3fcSJeremy Kerr 		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
149fc59a3fcSJeremy Kerr 		return -EIO;
150fc59a3fcSJeremy Kerr 	}
151fc59a3fcSJeremy Kerr 
152fc59a3fcSJeremy Kerr 	/* set our initial buffer indices */
153872e439aSPaul Mackerras 	lppaca_of(dtl->cpu).dtl_idx = 0;
154fc59a3fcSJeremy Kerr 
15582631f5dSJeremy Kerr 	/* ensure that our updates to the lppaca fields have occurred before
15682631f5dSJeremy Kerr 	 * we actually enable the logging */
15782631f5dSJeremy Kerr 	smp_wmb();
15882631f5dSJeremy Kerr 
159fc59a3fcSJeremy Kerr 	/* enable event logging */
1608154c5d2SPaul Mackerras 	lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
161fc59a3fcSJeremy Kerr 
162fc59a3fcSJeremy Kerr 	return 0;
163fc59a3fcSJeremy Kerr }
164fc59a3fcSJeremy Kerr 
dtl_stop(struct dtl * dtl)165872e439aSPaul Mackerras static void dtl_stop(struct dtl *dtl)
166fc59a3fcSJeremy Kerr {
167fc59a3fcSJeremy Kerr 	int hwcpu = get_hard_smp_processor_id(dtl->cpu);
168fc59a3fcSJeremy Kerr 
1698154c5d2SPaul Mackerras 	lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
170fc59a3fcSJeremy Kerr 
171b1301797SAnton Blanchard 	unregister_dtl(hwcpu);
172872e439aSPaul Mackerras }
173fc59a3fcSJeremy Kerr 
dtl_current_index(struct dtl * dtl)174872e439aSPaul Mackerras static u64 dtl_current_index(struct dtl *dtl)
175872e439aSPaul Mackerras {
1769258227eSNaveen N. Rao 	return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
177872e439aSPaul Mackerras }
178abf917cdSFrederic Weisbecker #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
179872e439aSPaul Mackerras 
dtl_enable(struct dtl * dtl)180872e439aSPaul Mackerras static int dtl_enable(struct dtl *dtl)
181872e439aSPaul Mackerras {
182872e439aSPaul Mackerras 	long int n_entries;
183872e439aSPaul Mackerras 	long int rc;
184872e439aSPaul Mackerras 	struct dtl_entry *buf = NULL;
185872e439aSPaul Mackerras 
186af442a1bSNishanth Aravamudan 	if (!dtl_cache)
187af442a1bSNishanth Aravamudan 		return -ENOMEM;
188af442a1bSNishanth Aravamudan 
189872e439aSPaul Mackerras 	/* only allow one reader */
190872e439aSPaul Mackerras 	if (dtl->buf)
191872e439aSPaul Mackerras 		return -EBUSY;
192872e439aSPaul Mackerras 
19306220d78SNaveen N. Rao 	/* ensure there are no other conflicting dtl users */
194*cadae3a4SMichael Ellerman 	if (!down_read_trylock(&dtl_access_lock))
19506220d78SNaveen N. Rao 		return -EBUSY;
19606220d78SNaveen N. Rao 
197872e439aSPaul Mackerras 	n_entries = dtl_buf_entries;
198af442a1bSNishanth Aravamudan 	buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
199872e439aSPaul Mackerras 	if (!buf) {
200872e439aSPaul Mackerras 		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
201872e439aSPaul Mackerras 				__func__, dtl->cpu);
202*cadae3a4SMichael Ellerman 		up_read(&dtl_access_lock);
203872e439aSPaul Mackerras 		return -ENOMEM;
204872e439aSPaul Mackerras 	}
205872e439aSPaul Mackerras 
206872e439aSPaul Mackerras 	spin_lock(&dtl->lock);
207872e439aSPaul Mackerras 	rc = -EBUSY;
208872e439aSPaul Mackerras 	if (!dtl->buf) {
209872e439aSPaul Mackerras 		/* store the original allocation size for use during read */
210872e439aSPaul Mackerras 		dtl->buf_entries = n_entries;
211872e439aSPaul Mackerras 		dtl->buf = buf;
212872e439aSPaul Mackerras 		dtl->last_idx = 0;
213872e439aSPaul Mackerras 		rc = dtl_start(dtl);
214872e439aSPaul Mackerras 		if (rc)
215872e439aSPaul Mackerras 			dtl->buf = NULL;
216872e439aSPaul Mackerras 	}
217872e439aSPaul Mackerras 	spin_unlock(&dtl->lock);
218872e439aSPaul Mackerras 
21906220d78SNaveen N. Rao 	if (rc) {
220*cadae3a4SMichael Ellerman 		up_read(&dtl_access_lock);
221af442a1bSNishanth Aravamudan 		kmem_cache_free(dtl_cache, buf);
22206220d78SNaveen N. Rao 	}
22306220d78SNaveen N. Rao 
224872e439aSPaul Mackerras 	return rc;
225872e439aSPaul Mackerras }
226872e439aSPaul Mackerras 
dtl_disable(struct dtl * dtl)227872e439aSPaul Mackerras static void dtl_disable(struct dtl *dtl)
228872e439aSPaul Mackerras {
229872e439aSPaul Mackerras 	spin_lock(&dtl->lock);
230872e439aSPaul Mackerras 	dtl_stop(dtl);
231af442a1bSNishanth Aravamudan 	kmem_cache_free(dtl_cache, dtl->buf);
232fc59a3fcSJeremy Kerr 	dtl->buf = NULL;
233fc59a3fcSJeremy Kerr 	dtl->buf_entries = 0;
234872e439aSPaul Mackerras 	spin_unlock(&dtl->lock);
235*cadae3a4SMichael Ellerman 	up_read(&dtl_access_lock);
236fc59a3fcSJeremy Kerr }
237fc59a3fcSJeremy Kerr 
238fc59a3fcSJeremy Kerr /* file interface */
239fc59a3fcSJeremy Kerr 
dtl_file_open(struct inode * inode,struct file * filp)240fc59a3fcSJeremy Kerr static int dtl_file_open(struct inode *inode, struct file *filp)
241fc59a3fcSJeremy Kerr {
242fc59a3fcSJeremy Kerr 	struct dtl *dtl = inode->i_private;
243fc59a3fcSJeremy Kerr 	int rc;
244fc59a3fcSJeremy Kerr 
245fc59a3fcSJeremy Kerr 	rc = dtl_enable(dtl);
246fc59a3fcSJeremy Kerr 	if (rc)
247fc59a3fcSJeremy Kerr 		return rc;
248fc59a3fcSJeremy Kerr 
249fc59a3fcSJeremy Kerr 	filp->private_data = dtl;
250fc59a3fcSJeremy Kerr 	return 0;
251fc59a3fcSJeremy Kerr }
252fc59a3fcSJeremy Kerr 
dtl_file_release(struct inode * inode,struct file * filp)253fc59a3fcSJeremy Kerr static int dtl_file_release(struct inode *inode, struct file *filp)
254fc59a3fcSJeremy Kerr {
255fc59a3fcSJeremy Kerr 	struct dtl *dtl = inode->i_private;
256fc59a3fcSJeremy Kerr 	dtl_disable(dtl);
257fc59a3fcSJeremy Kerr 	return 0;
258fc59a3fcSJeremy Kerr }
259fc59a3fcSJeremy Kerr 
dtl_file_read(struct file * filp,char __user * buf,size_t len,loff_t * pos)260fc59a3fcSJeremy Kerr static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
261fc59a3fcSJeremy Kerr 		loff_t *pos)
262fc59a3fcSJeremy Kerr {
263872e439aSPaul Mackerras 	long int rc, n_read, n_req, read_size;
264fc59a3fcSJeremy Kerr 	struct dtl *dtl;
265872e439aSPaul Mackerras 	u64 cur_idx, last_idx, i;
266fc59a3fcSJeremy Kerr 
267fc59a3fcSJeremy Kerr 	if ((len % sizeof(struct dtl_entry)) != 0)
268fc59a3fcSJeremy Kerr 		return -EINVAL;
269fc59a3fcSJeremy Kerr 
270fc59a3fcSJeremy Kerr 	dtl = filp->private_data;
271fc59a3fcSJeremy Kerr 
272fc59a3fcSJeremy Kerr 	/* requested number of entries to read */
273fc59a3fcSJeremy Kerr 	n_req = len / sizeof(struct dtl_entry);
274fc59a3fcSJeremy Kerr 
275fc59a3fcSJeremy Kerr 	/* actual number of entries read */
276fc59a3fcSJeremy Kerr 	n_read = 0;
277fc59a3fcSJeremy Kerr 
278872e439aSPaul Mackerras 	spin_lock(&dtl->lock);
279872e439aSPaul Mackerras 
280872e439aSPaul Mackerras 	cur_idx = dtl_current_index(dtl);
281fc59a3fcSJeremy Kerr 	last_idx = dtl->last_idx;
282fc59a3fcSJeremy Kerr 
283872e439aSPaul Mackerras 	if (last_idx + dtl->buf_entries <= cur_idx)
284872e439aSPaul Mackerras 		last_idx = cur_idx - dtl->buf_entries + 1;
285fc59a3fcSJeremy Kerr 
286872e439aSPaul Mackerras 	if (last_idx + n_req > cur_idx)
287872e439aSPaul Mackerras 		n_req = cur_idx - last_idx;
288872e439aSPaul Mackerras 
289872e439aSPaul Mackerras 	if (n_req > 0)
290872e439aSPaul Mackerras 		dtl->last_idx = last_idx + n_req;
291872e439aSPaul Mackerras 
292872e439aSPaul Mackerras 	spin_unlock(&dtl->lock);
293872e439aSPaul Mackerras 
294872e439aSPaul Mackerras 	if (n_req <= 0)
295872e439aSPaul Mackerras 		return 0;
296872e439aSPaul Mackerras 
297872e439aSPaul Mackerras 	i = last_idx % dtl->buf_entries;
298fc59a3fcSJeremy Kerr 
299fc59a3fcSJeremy Kerr 	/* read the tail of the buffer if we've wrapped */
300872e439aSPaul Mackerras 	if (i + n_req > dtl->buf_entries) {
301872e439aSPaul Mackerras 		read_size = dtl->buf_entries - i;
302fc59a3fcSJeremy Kerr 
303872e439aSPaul Mackerras 		rc = copy_to_user(buf, &dtl->buf[i],
304fc59a3fcSJeremy Kerr 				read_size * sizeof(struct dtl_entry));
305fc59a3fcSJeremy Kerr 		if (rc)
306fc59a3fcSJeremy Kerr 			return -EFAULT;
307fc59a3fcSJeremy Kerr 
308872e439aSPaul Mackerras 		i = 0;
309fc59a3fcSJeremy Kerr 		n_req -= read_size;
310fc59a3fcSJeremy Kerr 		n_read += read_size;
311fc59a3fcSJeremy Kerr 		buf += read_size * sizeof(struct dtl_entry);
312fc59a3fcSJeremy Kerr 	}
313fc59a3fcSJeremy Kerr 
314fc59a3fcSJeremy Kerr 	/* .. and now the head */
315872e439aSPaul Mackerras 	rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
316fc59a3fcSJeremy Kerr 	if (rc)
317fc59a3fcSJeremy Kerr 		return -EFAULT;
318fc59a3fcSJeremy Kerr 
319872e439aSPaul Mackerras 	n_read += n_req;
320fc59a3fcSJeremy Kerr 
321fc59a3fcSJeremy Kerr 	return n_read * sizeof(struct dtl_entry);
322fc59a3fcSJeremy Kerr }
323fc59a3fcSJeremy Kerr 
324828c0950SAlexey Dobriyan static const struct file_operations dtl_fops = {
325fc59a3fcSJeremy Kerr 	.open		= dtl_file_open,
326fc59a3fcSJeremy Kerr 	.release	= dtl_file_release,
327fc59a3fcSJeremy Kerr 	.read		= dtl_file_read,
328fc59a3fcSJeremy Kerr };
329fc59a3fcSJeremy Kerr 
330fc59a3fcSJeremy Kerr static struct dentry *dtl_dir;
331fc59a3fcSJeremy Kerr 
dtl_setup_file(struct dtl * dtl)332ff229319SGreg Kroah-Hartman static void dtl_setup_file(struct dtl *dtl)
333fc59a3fcSJeremy Kerr {
334fc59a3fcSJeremy Kerr 	char name[10];
335fc59a3fcSJeremy Kerr 
336fc59a3fcSJeremy Kerr 	sprintf(name, "cpu-%d", dtl->cpu);
337fc59a3fcSJeremy Kerr 
338ff229319SGreg Kroah-Hartman 	debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
339fc59a3fcSJeremy Kerr }
340fc59a3fcSJeremy Kerr 
dtl_init(void)341fc59a3fcSJeremy Kerr static int dtl_init(void)
342fc59a3fcSJeremy Kerr {
343ff229319SGreg Kroah-Hartman 	int i;
344fc59a3fcSJeremy Kerr 
345fc59a3fcSJeremy Kerr 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
346fc59a3fcSJeremy Kerr 		return -ENODEV;
347fc59a3fcSJeremy Kerr 
348fc59a3fcSJeremy Kerr 	/* set up common debugfs structure */
349fc59a3fcSJeremy Kerr 
350dbf77fedSAneesh Kumar K.V 	dtl_dir = debugfs_create_dir("dtl", arch_debugfs_dir);
351fc59a3fcSJeremy Kerr 
352ff229319SGreg Kroah-Hartman 	debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
353ff229319SGreg Kroah-Hartman 	debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
354fc59a3fcSJeremy Kerr 
355fc59a3fcSJeremy Kerr 	/* set up the per-cpu log structures */
356fc59a3fcSJeremy Kerr 	for_each_possible_cpu(i) {
3576b7487fcSTejun Heo 		struct dtl *dtl = &per_cpu(cpu_dtl, i);
358872e439aSPaul Mackerras 		spin_lock_init(&dtl->lock);
359fc59a3fcSJeremy Kerr 		dtl->cpu = i;
360fc59a3fcSJeremy Kerr 
361ff229319SGreg Kroah-Hartman 		dtl_setup_file(dtl);
362fc59a3fcSJeremy Kerr 	}
363fc59a3fcSJeremy Kerr 
364fc59a3fcSJeremy Kerr 	return 0;
365fc59a3fcSJeremy Kerr }
3668e83e905SMichael Ellerman machine_arch_initcall(pseries, dtl_init);
36790d5ce82SNicholas Piggin #endif /* CONFIG_DTL */
36890d5ce82SNicholas Piggin 
36990d5ce82SNicholas Piggin #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
37090d5ce82SNicholas Piggin /*
37190d5ce82SNicholas Piggin  * Scan the dispatch trace log and count up the stolen time.
37290d5ce82SNicholas Piggin  * Should be called with interrupts disabled.
37390d5ce82SNicholas Piggin  */
scan_dispatch_log(u64 stop_tb)37490d5ce82SNicholas Piggin static notrace u64 scan_dispatch_log(u64 stop_tb)
37590d5ce82SNicholas Piggin {
37690d5ce82SNicholas Piggin 	u64 i = local_paca->dtl_ridx;
37790d5ce82SNicholas Piggin 	struct dtl_entry *dtl = local_paca->dtl_curr;
37890d5ce82SNicholas Piggin 	struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
37990d5ce82SNicholas Piggin 	struct lppaca *vpa = local_paca->lppaca_ptr;
38090d5ce82SNicholas Piggin 	u64 tb_delta;
38190d5ce82SNicholas Piggin 	u64 stolen = 0;
38290d5ce82SNicholas Piggin 	u64 dtb;
38390d5ce82SNicholas Piggin 
38490d5ce82SNicholas Piggin 	if (!dtl)
38590d5ce82SNicholas Piggin 		return 0;
38690d5ce82SNicholas Piggin 
38790d5ce82SNicholas Piggin 	if (i == be64_to_cpu(vpa->dtl_idx))
38890d5ce82SNicholas Piggin 		return 0;
38990d5ce82SNicholas Piggin 	while (i < be64_to_cpu(vpa->dtl_idx)) {
39090d5ce82SNicholas Piggin 		dtb = be64_to_cpu(dtl->timebase);
39190d5ce82SNicholas Piggin 		tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
39290d5ce82SNicholas Piggin 			be32_to_cpu(dtl->ready_to_enqueue_time);
39390d5ce82SNicholas Piggin 		barrier();
39490d5ce82SNicholas Piggin 		if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
39590d5ce82SNicholas Piggin 			/* buffer has overflowed */
39690d5ce82SNicholas Piggin 			i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
39790d5ce82SNicholas Piggin 			dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
39890d5ce82SNicholas Piggin 			continue;
39990d5ce82SNicholas Piggin 		}
40090d5ce82SNicholas Piggin 		if (dtb > stop_tb)
40190d5ce82SNicholas Piggin 			break;
40290d5ce82SNicholas Piggin #ifdef CONFIG_DTL
40390d5ce82SNicholas Piggin 		if (dtl_consumer)
40490d5ce82SNicholas Piggin 			dtl_consumer(dtl, i);
40590d5ce82SNicholas Piggin #endif
40690d5ce82SNicholas Piggin 		stolen += tb_delta;
40790d5ce82SNicholas Piggin 		++i;
40890d5ce82SNicholas Piggin 		++dtl;
40990d5ce82SNicholas Piggin 		if (dtl == dtl_end)
41090d5ce82SNicholas Piggin 			dtl = local_paca->dispatch_log;
41190d5ce82SNicholas Piggin 	}
41290d5ce82SNicholas Piggin 	local_paca->dtl_ridx = i;
41390d5ce82SNicholas Piggin 	local_paca->dtl_curr = dtl;
41490d5ce82SNicholas Piggin 	return stolen;
41590d5ce82SNicholas Piggin }
41690d5ce82SNicholas Piggin 
41790d5ce82SNicholas Piggin /*
41890d5ce82SNicholas Piggin  * Accumulate stolen time by scanning the dispatch trace log.
41990d5ce82SNicholas Piggin  * Called on entry from user mode.
42090d5ce82SNicholas Piggin  */
pseries_accumulate_stolen_time(void)42190d5ce82SNicholas Piggin void notrace pseries_accumulate_stolen_time(void)
42290d5ce82SNicholas Piggin {
42390d5ce82SNicholas Piggin 	u64 sst, ust;
42490d5ce82SNicholas Piggin 	struct cpu_accounting_data *acct = &local_paca->accounting;
42590d5ce82SNicholas Piggin 
42690d5ce82SNicholas Piggin 	sst = scan_dispatch_log(acct->starttime_user);
42790d5ce82SNicholas Piggin 	ust = scan_dispatch_log(acct->starttime);
42890d5ce82SNicholas Piggin 	acct->stime -= sst;
42990d5ce82SNicholas Piggin 	acct->utime -= ust;
43090d5ce82SNicholas Piggin 	acct->steal_time += ust + sst;
43190d5ce82SNicholas Piggin }
43290d5ce82SNicholas Piggin 
pseries_calculate_stolen_time(u64 stop_tb)43390d5ce82SNicholas Piggin u64 pseries_calculate_stolen_time(u64 stop_tb)
43490d5ce82SNicholas Piggin {
43590d5ce82SNicholas Piggin 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
43690d5ce82SNicholas Piggin 		return 0;
43790d5ce82SNicholas Piggin 
43890d5ce82SNicholas Piggin 	if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
43990d5ce82SNicholas Piggin 		return scan_dispatch_log(stop_tb);
44090d5ce82SNicholas Piggin 
44190d5ce82SNicholas Piggin 	return 0;
44290d5ce82SNicholas Piggin }
44390d5ce82SNicholas Piggin 
44490d5ce82SNicholas Piggin #endif
445