1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2016 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/atomic.h> 8 #include <linux/circ_buf.h> 9 #include <linux/coresight.h> 10 #include <linux/perf_event.h> 11 #include <linux/slab.h> 12 #include "coresight-priv.h" 13 #include "coresight-tmc.h" 14 #include "coresight-etm-perf.h" 15 16 static int tmc_set_etf_buffer(struct coresight_device *csdev, 17 struct perf_output_handle *handle); 18 19 static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 20 { 21 int rc = 0; 22 u32 ffcr; 23 24 CS_UNLOCK(drvdata->base); 25 26 /* Wait for TMCSReady bit to be set */ 27 rc = tmc_wait_for_tmcready(drvdata); 28 if (rc) { 29 dev_err(&drvdata->csdev->dev, 30 "Failed to enable: TMC not ready\n"); 31 CS_LOCK(drvdata->base); 32 return rc; 33 } 34 35 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); 36 37 ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN | 38 TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN; 39 if (drvdata->stop_on_flush) 40 ffcr |= TMC_FFCR_STOP_ON_FLUSH; 41 writel_relaxed(ffcr, drvdata->base + TMC_FFCR); 42 43 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); 44 tmc_enable_hw(drvdata); 45 46 CS_LOCK(drvdata->base); 47 return rc; 48 } 49 50 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 51 { 52 int rc = coresight_claim_device(drvdata->csdev); 53 54 if (rc) 55 return rc; 56 57 rc = __tmc_etb_enable_hw(drvdata); 58 if (rc) 59 coresight_disclaim_device(drvdata->csdev); 60 return rc; 61 } 62 63 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 64 { 65 char *bufp; 66 u32 read_data, lost; 67 68 /* Check if the buffer wrapped around. */ 69 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL; 70 bufp = drvdata->buf; 71 drvdata->len = 0; 72 while (1) { 73 read_data = readl_relaxed(drvdata->base + TMC_RRD); 74 if (read_data == 0xFFFFFFFF) 75 break; 76 memcpy(bufp, &read_data, 4); 77 bufp += 4; 78 drvdata->len += 4; 79 } 80 81 if (lost) 82 coresight_insert_barrier_packet(drvdata->buf); 83 return; 84 } 85 86 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 87 { 88 CS_UNLOCK(drvdata->base); 89 90 tmc_flush_and_stop(drvdata); 91 /* 92 * When operating in sysFS mode the content of the buffer needs to be 93 * read before the TMC is disabled. 94 */ 95 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) 96 tmc_etb_dump_hw(drvdata); 97 tmc_disable_hw(drvdata); 98 99 CS_LOCK(drvdata->base); 100 } 101 102 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) 103 { 104 __tmc_etb_disable_hw(drvdata); 105 coresight_disclaim_device(drvdata->csdev); 106 } 107 108 static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 109 { 110 int rc = 0; 111 112 CS_UNLOCK(drvdata->base); 113 114 /* Wait for TMCSReady bit to be set */ 115 rc = tmc_wait_for_tmcready(drvdata); 116 if (rc) { 117 dev_err(&drvdata->csdev->dev, 118 "Failed to enable : TMC is not ready\n"); 119 CS_LOCK(drvdata->base); 120 return rc; 121 } 122 123 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); 124 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, 125 drvdata->base + TMC_FFCR); 126 writel_relaxed(0x0, drvdata->base + TMC_BUFWM); 127 tmc_enable_hw(drvdata); 128 129 CS_LOCK(drvdata->base); 130 return rc; 131 } 132 133 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata) 134 { 135 int rc = coresight_claim_device(drvdata->csdev); 136 137 if (rc) 138 return rc; 139 140 rc = __tmc_etf_enable_hw(drvdata); 141 if (rc) 142 coresight_disclaim_device(drvdata->csdev); 143 return rc; 144 } 145 146 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) 147 { 148 struct coresight_device *csdev = drvdata->csdev; 149 150 CS_UNLOCK(drvdata->base); 151 152 tmc_flush_and_stop(drvdata); 153 tmc_disable_hw(drvdata); 154 coresight_disclaim_device_unlocked(csdev); 155 CS_LOCK(drvdata->base); 156 } 157 158 /* 159 * Return the available trace data in the buffer from @pos, with 160 * a maximum limit of @len, updating the @bufpp on where to 161 * find it. 162 */ 163 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata, 164 loff_t pos, size_t len, char **bufpp) 165 { 166 ssize_t actual = len; 167 168 /* Adjust the len to available size @pos */ 169 if (pos + actual > drvdata->len) 170 actual = drvdata->len - pos; 171 if (actual > 0) 172 *bufpp = drvdata->buf + pos; 173 return actual; 174 } 175 176 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) 177 { 178 int ret = 0; 179 bool used = false; 180 char *buf = NULL; 181 unsigned long flags; 182 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 183 184 /* 185 * If we don't have a buffer release the lock and allocate memory. 186 * Otherwise keep the lock and move along. 187 */ 188 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 189 if (!drvdata->buf) { 190 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 191 192 /* Allocating the memory here while outside of the spinlock */ 193 buf = kzalloc(drvdata->size, GFP_KERNEL); 194 if (!buf) 195 return -ENOMEM; 196 197 /* Let's try again */ 198 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 199 } 200 201 if (drvdata->reading) { 202 ret = -EBUSY; 203 goto out; 204 } 205 206 /* 207 * In sysFS mode we can have multiple writers per sink. Since this 208 * sink is already enabled no memory is needed and the HW need not be 209 * touched. 210 */ 211 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) { 212 csdev->refcnt++; 213 goto out; 214 } 215 216 /* 217 * If drvdata::buf isn't NULL, memory was allocated for a previous 218 * trace run but wasn't read. If so simply zero-out the memory. 219 * Otherwise use the memory allocated above. 220 * 221 * The memory is freed when users read the buffer using the 222 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for 223 * details. 224 */ 225 if (drvdata->buf) { 226 memset(drvdata->buf, 0, drvdata->size); 227 } else { 228 used = true; 229 drvdata->buf = buf; 230 } 231 ret = tmc_etb_enable_hw(drvdata); 232 if (!ret) { 233 coresight_set_mode(csdev, CS_MODE_SYSFS); 234 csdev->refcnt++; 235 } else { 236 /* Free up the buffer if we failed to enable */ 237 used = false; 238 } 239 out: 240 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 241 242 /* Free memory outside the spinlock if need be */ 243 if (!used) 244 kfree(buf); 245 246 return ret; 247 } 248 249 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) 250 { 251 int ret = 0; 252 pid_t pid; 253 unsigned long flags; 254 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 255 struct perf_output_handle *handle = data; 256 struct cs_buffers *buf = etm_perf_sink_config(handle); 257 258 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 259 do { 260 ret = -EINVAL; 261 if (drvdata->reading) 262 break; 263 /* 264 * No need to continue if the ETB/ETF is already operated 265 * from sysFS. 266 */ 267 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) { 268 ret = -EBUSY; 269 break; 270 } 271 272 /* Get a handle on the pid of the process to monitor */ 273 pid = buf->pid; 274 275 if (drvdata->pid != -1 && drvdata->pid != pid) { 276 ret = -EBUSY; 277 break; 278 } 279 280 ret = tmc_set_etf_buffer(csdev, handle); 281 if (ret) 282 break; 283 284 /* 285 * No HW configuration is needed if the sink is already in 286 * use for this session. 287 */ 288 if (drvdata->pid == pid) { 289 csdev->refcnt++; 290 break; 291 } 292 293 ret = tmc_etb_enable_hw(drvdata); 294 if (!ret) { 295 /* Associate with monitored process. */ 296 drvdata->pid = pid; 297 coresight_set_mode(csdev, CS_MODE_PERF); 298 csdev->refcnt++; 299 } 300 } while (0); 301 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 302 303 return ret; 304 } 305 306 static int tmc_enable_etf_sink(struct coresight_device *csdev, 307 enum cs_mode mode, void *data) 308 { 309 int ret; 310 311 switch (mode) { 312 case CS_MODE_SYSFS: 313 ret = tmc_enable_etf_sink_sysfs(csdev); 314 break; 315 case CS_MODE_PERF: 316 ret = tmc_enable_etf_sink_perf(csdev, data); 317 break; 318 /* We shouldn't be here */ 319 default: 320 ret = -EINVAL; 321 break; 322 } 323 324 if (ret) 325 return ret; 326 327 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n"); 328 return 0; 329 } 330 331 static int tmc_disable_etf_sink(struct coresight_device *csdev) 332 { 333 unsigned long flags; 334 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 335 336 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 337 338 if (drvdata->reading) { 339 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 340 return -EBUSY; 341 } 342 343 csdev->refcnt--; 344 if (csdev->refcnt) { 345 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 346 return -EBUSY; 347 } 348 349 /* Complain if we (somehow) got out of sync */ 350 WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED); 351 tmc_etb_disable_hw(drvdata); 352 /* Dissociate from monitored process. */ 353 drvdata->pid = -1; 354 coresight_set_mode(csdev, CS_MODE_DISABLED); 355 356 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 357 358 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n"); 359 return 0; 360 } 361 362 static int tmc_enable_etf_link(struct coresight_device *csdev, 363 struct coresight_connection *in, 364 struct coresight_connection *out) 365 { 366 int ret = 0; 367 unsigned long flags; 368 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 369 bool first_enable = false; 370 371 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 372 if (drvdata->reading) { 373 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 374 return -EBUSY; 375 } 376 377 if (csdev->refcnt == 0) { 378 ret = tmc_etf_enable_hw(drvdata); 379 if (!ret) { 380 coresight_set_mode(csdev, CS_MODE_SYSFS); 381 first_enable = true; 382 } 383 } 384 if (!ret) 385 csdev->refcnt++; 386 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 387 388 if (first_enable) 389 dev_dbg(&csdev->dev, "TMC-ETF enabled\n"); 390 return ret; 391 } 392 393 static void tmc_disable_etf_link(struct coresight_device *csdev, 394 struct coresight_connection *in, 395 struct coresight_connection *out) 396 { 397 unsigned long flags; 398 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 399 bool last_disable = false; 400 401 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 402 if (drvdata->reading) { 403 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 404 return; 405 } 406 407 csdev->refcnt--; 408 if (csdev->refcnt == 0) { 409 tmc_etf_disable_hw(drvdata); 410 coresight_set_mode(csdev, CS_MODE_DISABLED); 411 last_disable = true; 412 } 413 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 414 415 if (last_disable) 416 dev_dbg(&csdev->dev, "TMC-ETF disabled\n"); 417 } 418 419 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, 420 struct perf_event *event, void **pages, 421 int nr_pages, bool overwrite) 422 { 423 int node; 424 struct cs_buffers *buf; 425 426 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu); 427 428 /* Allocate memory structure for interaction with Perf */ 429 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); 430 if (!buf) 431 return NULL; 432 433 buf->pid = task_pid_nr(event->owner); 434 buf->snapshot = overwrite; 435 buf->nr_pages = nr_pages; 436 buf->data_pages = pages; 437 438 return buf; 439 } 440 441 static void tmc_free_etf_buffer(void *config) 442 { 443 struct cs_buffers *buf = config; 444 445 kfree(buf); 446 } 447 448 static int tmc_set_etf_buffer(struct coresight_device *csdev, 449 struct perf_output_handle *handle) 450 { 451 int ret = 0; 452 unsigned long head; 453 struct cs_buffers *buf = etm_perf_sink_config(handle); 454 455 if (!buf) 456 return -EINVAL; 457 458 /* wrap head around to the amount of space we have */ 459 head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1); 460 461 /* find the page to write to */ 462 buf->cur = head / PAGE_SIZE; 463 464 /* and offset within that page */ 465 buf->offset = head % PAGE_SIZE; 466 467 local_set(&buf->data_size, 0); 468 469 return ret; 470 } 471 472 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev, 473 struct perf_output_handle *handle, 474 void *sink_config) 475 { 476 bool lost = false; 477 int i, cur; 478 const u32 *barrier; 479 u32 *buf_ptr; 480 u64 read_ptr, write_ptr; 481 u32 status; 482 unsigned long offset, to_read = 0, flags; 483 struct cs_buffers *buf = sink_config; 484 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 485 486 if (!buf) 487 return 0; 488 489 /* This shouldn't happen */ 490 if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF)) 491 return 0; 492 493 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 494 495 /* Don't do anything if another tracer is using this sink */ 496 if (csdev->refcnt != 1) 497 goto out; 498 499 CS_UNLOCK(drvdata->base); 500 501 tmc_flush_and_stop(drvdata); 502 503 read_ptr = tmc_read_rrp(drvdata); 504 write_ptr = tmc_read_rwp(drvdata); 505 506 /* 507 * Get a hold of the status register and see if a wrap around 508 * has occurred. If so adjust things accordingly. 509 */ 510 status = readl_relaxed(drvdata->base + TMC_STS); 511 if (status & TMC_STS_FULL) { 512 lost = true; 513 to_read = drvdata->size; 514 } else { 515 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); 516 } 517 518 /* 519 * The TMC RAM buffer may be bigger than the space available in the 520 * perf ring buffer (handle->size). If so advance the RRP so that we 521 * get the latest trace data. In snapshot mode none of that matters 522 * since we are expected to clobber stale data in favour of the latest 523 * traces. 524 */ 525 if (!buf->snapshot && to_read > handle->size) { 526 u32 mask = tmc_get_memwidth_mask(drvdata); 527 528 /* 529 * Make sure the new size is aligned in accordance with the 530 * requirement explained in function tmc_get_memwidth_mask(). 531 */ 532 to_read = handle->size & mask; 533 /* Move the RAM read pointer up */ 534 read_ptr = (write_ptr + drvdata->size) - to_read; 535 /* Make sure we are still within our limits */ 536 if (read_ptr > (drvdata->size - 1)) 537 read_ptr -= drvdata->size; 538 /* Tell the HW */ 539 tmc_write_rrp(drvdata, read_ptr); 540 lost = true; 541 } 542 543 /* 544 * Don't set the TRUNCATED flag in snapshot mode because 1) the 545 * captured buffer is expected to be truncated and 2) a full buffer 546 * prevents the event from being re-enabled by the perf core, 547 * resulting in stale data being send to user space. 548 */ 549 if (!buf->snapshot && lost) 550 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 551 552 cur = buf->cur; 553 offset = buf->offset; 554 barrier = coresight_barrier_pkt; 555 556 /* for every byte to read */ 557 for (i = 0; i < to_read; i += 4) { 558 buf_ptr = buf->data_pages[cur] + offset; 559 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); 560 561 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) { 562 *buf_ptr = *barrier; 563 barrier++; 564 } 565 566 offset += 4; 567 if (offset >= PAGE_SIZE) { 568 offset = 0; 569 cur++; 570 /* wrap around at the end of the buffer */ 571 cur &= buf->nr_pages - 1; 572 } 573 } 574 575 /* 576 * In snapshot mode we simply increment the head by the number of byte 577 * that were written. User space will figure out how many bytes to get 578 * from the AUX buffer based on the position of the head. 579 */ 580 if (buf->snapshot) 581 handle->head += to_read; 582 583 /* 584 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace 585 * data before the aux_head is updated via perf_aux_output_end(), which 586 * is expected by the perf ring buffer. 587 */ 588 CS_LOCK(drvdata->base); 589 out: 590 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 591 592 return to_read; 593 } 594 595 static int tmc_panic_sync_etf(struct coresight_device *csdev) 596 { 597 u32 val; 598 struct tmc_crash_metadata *mdata; 599 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 600 601 mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr; 602 603 /* Make sure we have valid reserved memory */ 604 if (!tmc_has_reserved_buffer(drvdata) || 605 !tmc_has_crash_mdata_buffer(drvdata)) 606 return 0; 607 608 tmc_crashdata_set_invalid(drvdata); 609 610 CS_UNLOCK(drvdata->base); 611 612 /* Proceed only if ETF is enabled or configured as sink */ 613 val = readl(drvdata->base + TMC_CTL); 614 if (!(val & TMC_CTL_CAPT_EN)) 615 goto out; 616 val = readl(drvdata->base + TMC_MODE); 617 if (val != TMC_MODE_CIRCULAR_BUFFER) 618 goto out; 619 620 val = readl(drvdata->base + TMC_FFSR); 621 /* Do manual flush and stop only if its not auto-stopped */ 622 if (!(val & TMC_FFSR_FT_STOPPED)) { 623 dev_dbg(&csdev->dev, 624 "%s: Triggering manual flush\n", __func__); 625 tmc_flush_and_stop(drvdata); 626 } else 627 tmc_wait_for_tmcready(drvdata); 628 629 /* Sync registers from hardware to metadata region */ 630 mdata->tmc_sts = readl(drvdata->base + TMC_STS); 631 mdata->tmc_mode = readl(drvdata->base + TMC_MODE); 632 mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR); 633 mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR); 634 635 /* Sync Internal SRAM to reserved trace buffer region */ 636 drvdata->buf = drvdata->resrv_buf.vaddr; 637 tmc_etb_dump_hw(drvdata); 638 /* Store as per RSZ register convention */ 639 mdata->tmc_ram_size = drvdata->len >> 2; 640 641 /* Other fields for processing trace buffer reads */ 642 mdata->tmc_rrp = 0; 643 mdata->tmc_dba = 0; 644 mdata->tmc_rwp = drvdata->len; 645 mdata->trace_paddr = drvdata->resrv_buf.paddr; 646 647 mdata->version = CS_CRASHDATA_VERSION; 648 649 /* 650 * Make sure all previous writes are ordered, 651 * before we mark valid 652 */ 653 dmb(sy); 654 mdata->valid = true; 655 /* 656 * Below order need to maintained, since crc of metadata 657 * is dependent on first 658 */ 659 mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata); 660 mdata->crc32_mdata = find_crash_metadata_crc(mdata); 661 662 tmc_disable_hw(drvdata); 663 664 dev_dbg(&csdev->dev, "%s: success\n", __func__); 665 out: 666 CS_UNLOCK(drvdata->base); 667 return 0; 668 } 669 670 static const struct coresight_ops_sink tmc_etf_sink_ops = { 671 .enable = tmc_enable_etf_sink, 672 .disable = tmc_disable_etf_sink, 673 .alloc_buffer = tmc_alloc_etf_buffer, 674 .free_buffer = tmc_free_etf_buffer, 675 .update_buffer = tmc_update_etf_buffer, 676 }; 677 678 static const struct coresight_ops_link tmc_etf_link_ops = { 679 .enable = tmc_enable_etf_link, 680 .disable = tmc_disable_etf_link, 681 }; 682 683 static const struct coresight_ops_panic tmc_etf_sync_ops = { 684 .sync = tmc_panic_sync_etf, 685 }; 686 687 const struct coresight_ops tmc_etb_cs_ops = { 688 .sink_ops = &tmc_etf_sink_ops, 689 }; 690 691 const struct coresight_ops tmc_etf_cs_ops = { 692 .sink_ops = &tmc_etf_sink_ops, 693 .link_ops = &tmc_etf_link_ops, 694 .panic_ops = &tmc_etf_sync_ops, 695 }; 696 697 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 698 { 699 enum tmc_mode mode; 700 int ret = 0; 701 unsigned long flags; 702 703 /* config types are set a boot time and never change */ 704 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 705 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 706 return -EINVAL; 707 708 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 709 710 if (drvdata->reading) { 711 ret = -EBUSY; 712 goto out; 713 } 714 715 /* Don't interfere if operated from Perf */ 716 if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) { 717 ret = -EINVAL; 718 goto out; 719 } 720 721 /* If drvdata::buf is NULL the trace data has been read already */ 722 if (drvdata->buf == NULL) { 723 ret = -EINVAL; 724 goto out; 725 } 726 727 /* Disable the TMC if need be */ 728 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { 729 /* There is no point in reading a TMC in HW FIFO mode */ 730 mode = readl_relaxed(drvdata->base + TMC_MODE); 731 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 732 ret = -EINVAL; 733 goto out; 734 } 735 __tmc_etb_disable_hw(drvdata); 736 } 737 738 drvdata->reading = true; 739 out: 740 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 741 742 return ret; 743 } 744 745 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) 746 { 747 char *buf = NULL; 748 enum tmc_mode mode; 749 unsigned long flags; 750 int rc = 0; 751 752 /* config types are set a boot time and never change */ 753 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && 754 drvdata->config_type != TMC_CONFIG_TYPE_ETF)) 755 return -EINVAL; 756 757 raw_spin_lock_irqsave(&drvdata->spinlock, flags); 758 759 /* Re-enable the TMC if need be */ 760 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { 761 /* There is no point in reading a TMC in HW FIFO mode */ 762 mode = readl_relaxed(drvdata->base + TMC_MODE); 763 if (mode != TMC_MODE_CIRCULAR_BUFFER) { 764 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 765 return -EINVAL; 766 } 767 /* 768 * The trace run will continue with the same allocated trace 769 * buffer. As such zero-out the buffer so that we don't end 770 * up with stale data. 771 * 772 * Since the tracer is still enabled drvdata::buf 773 * can't be NULL. 774 */ 775 memset(drvdata->buf, 0, drvdata->size); 776 rc = __tmc_etb_enable_hw(drvdata); 777 if (rc) { 778 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 779 return rc; 780 } 781 } else { 782 /* 783 * The ETB/ETF is not tracing and the buffer was just read. 784 * As such prepare to free the trace buffer. 785 */ 786 buf = drvdata->buf; 787 drvdata->buf = NULL; 788 } 789 790 drvdata->reading = false; 791 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); 792 793 /* 794 * Free allocated memory outside of the spinlock. There is no need 795 * to assert the validity of 'buf' since calling kfree(NULL) is safe. 796 */ 797 kfree(buf); 798 799 return 0; 800 } 801