Lines Matching full:perf
45 * PCIe NTB Perf Linux driver
126 * Perf driver data definition
144 struct perf_ctx *perf; member
169 struct perf_ctx *perf; member
205 int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
247 static void perf_terminate_test(struct perf_ctx *perf);
253 link = ntb_link_is_up(peer->perf->ntb, NULL, NULL); in perf_link_is_up()
260 struct perf_ctx *perf = peer->perf; in perf_spad_cmd_send() local
264 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data); in perf_spad_cmd_send()
277 sts = ntb_peer_spad_read(perf->ntb, peer->pidx, in perf_spad_cmd_send()
278 PERF_SPAD_CMD(perf->gidx)); in perf_spad_cmd_send()
284 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send()
285 PERF_SPAD_LDATA(perf->gidx), in perf_spad_cmd_send()
287 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send()
288 PERF_SPAD_HDATA(perf->gidx), in perf_spad_cmd_send()
290 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send()
291 PERF_SPAD_CMD(perf->gidx), in perf_spad_cmd_send()
293 ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx)); in perf_spad_cmd_send()
295 dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n", in perf_spad_cmd_send()
304 static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx, in perf_spad_cmd_recv() argument
310 ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); in perf_spad_cmd_recv()
318 for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) { in perf_spad_cmd_recv()
319 peer = &perf->peers[*pidx]; in perf_spad_cmd_recv()
324 val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx)); in perf_spad_cmd_recv()
330 val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx)); in perf_spad_cmd_recv()
333 val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx)); in perf_spad_cmd_recv()
337 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), in perf_spad_cmd_recv()
340 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data); in perf_spad_cmd_recv()
351 struct perf_ctx *perf = peer->perf; in perf_msg_cmd_send() local
355 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data); in perf_msg_cmd_send()
364 outbits = ntb_msg_outbits(perf->ntb); in perf_msg_cmd_send()
369 ret = ntb_msg_clear_sts(perf->ntb, outbits); in perf_msg_cmd_send()
373 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA, in perf_msg_cmd_send()
376 if (ntb_msg_read_sts(perf->ntb) & outbits) { in perf_msg_cmd_send()
381 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA, in perf_msg_cmd_send()
385 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd); in perf_msg_cmd_send()
393 static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx, in perf_msg_cmd_recv() argument
399 inbits = ntb_msg_inbits(perf->ntb); in perf_msg_cmd_recv()
401 if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3) in perf_msg_cmd_recv()
404 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD); in perf_msg_cmd_recv()
407 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA); in perf_msg_cmd_recv()
410 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA); in perf_msg_cmd_recv()
414 ntb_msg_clear_sts(perf->ntb, inbits); in perf_msg_cmd_recv()
416 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data); in perf_msg_cmd_recv()
423 struct perf_ctx *perf = peer->perf; in perf_cmd_send() local
426 return perf->cmd_send(peer, cmd, data); in perf_cmd_send()
428 dev_err(&perf->ntb->dev, "Send invalid command\n"); in perf_cmd_send()
442 dev_err(&peer->perf->ntb->dev, "Exec invalid command\n"); in perf_cmd_exec()
449 dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd); in perf_cmd_exec()
456 static int perf_cmd_recv(struct perf_ctx *perf) in perf_cmd_recv() argument
462 while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) { in perf_cmd_recv()
463 peer = &perf->peers[pidx]; in perf_cmd_recv()
473 dev_err(&perf->ntb->dev, "Recv invalid command\n"); in perf_cmd_recv()
484 struct perf_ctx *perf = ctx; in perf_link_event() local
489 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_link_event()
490 peer = &perf->peers[pidx]; in perf_link_event()
506 struct perf_ctx *perf = ctx; in perf_db_event() local
508 dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec, in perf_db_event()
509 ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb)); in perf_db_event()
512 (void)perf_cmd_recv(perf); in perf_db_event()
517 struct perf_ctx *perf = ctx; in perf_msg_event() local
519 dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n", in perf_msg_event()
520 ntb_msg_read_sts(perf->ntb)); in perf_msg_event()
523 (void)perf_cmd_recv(perf); in perf_msg_event()
534 (void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); in perf_free_outbuf()
539 struct perf_ctx *perf = peer->perf; in perf_setup_outbuf() local
543 ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx, in perf_setup_outbuf()
546 dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n"); in perf_setup_outbuf()
562 (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); in perf_free_inbuf()
563 dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size, in perf_free_inbuf()
571 struct perf_ctx *perf = peer->perf; in perf_setup_inbuf() local
575 ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx, in perf_setup_inbuf()
578 dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n"); in perf_setup_inbuf()
583 dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n", in perf_setup_inbuf()
592 peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev, in perf_setup_inbuf()
596 dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n", in perf_setup_inbuf()
601 dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n"); in perf_setup_inbuf()
605 ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx, in perf_setup_inbuf()
608 dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n"); in perf_setup_inbuf()
646 if (test_bit(0, &peer->perf->busy_flag) && in perf_service_work()
647 peer == peer->perf->test_peer) { in perf_service_work()
648 dev_warn(&peer->perf->ntb->dev, in perf_service_work()
650 perf_terminate_test(peer->perf); in perf_service_work()
657 static int perf_init_service(struct perf_ctx *perf) in perf_init_service() argument
661 if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) { in perf_init_service()
662 dev_err(&perf->ntb->dev, "Not enough memory windows\n"); in perf_init_service()
666 if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) { in perf_init_service()
667 perf->cmd_send = perf_msg_cmd_send; in perf_init_service()
668 perf->cmd_recv = perf_msg_cmd_recv; in perf_init_service()
670 dev_dbg(&perf->ntb->dev, "Message service initialized\n"); in perf_init_service()
675 dev_dbg(&perf->ntb->dev, "Message service unsupported\n"); in perf_init_service()
677 mask = GENMASK_ULL(perf->pcnt, 0); in perf_init_service()
678 if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) && in perf_init_service()
679 (ntb_db_valid_mask(perf->ntb) & mask) == mask) { in perf_init_service()
680 perf->cmd_send = perf_spad_cmd_send; in perf_init_service()
681 perf->cmd_recv = perf_spad_cmd_recv; in perf_init_service()
683 dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n"); in perf_init_service()
688 dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n"); in perf_init_service()
690 dev_err(&perf->ntb->dev, "Command services unsupported\n"); in perf_init_service()
695 static int perf_enable_service(struct perf_ctx *perf) in perf_enable_service() argument
700 mask = ntb_db_valid_mask(perf->ntb); in perf_enable_service()
701 (void)ntb_db_set_mask(perf->ntb, mask); in perf_enable_service()
703 ret = ntb_set_ctx(perf->ntb, perf, &perf_ops); in perf_enable_service()
707 if (perf->cmd_send == perf_msg_cmd_send) { in perf_enable_service()
710 inbits = ntb_msg_inbits(perf->ntb); in perf_enable_service()
711 outbits = ntb_msg_outbits(perf->ntb); in perf_enable_service()
712 (void)ntb_msg_set_mask(perf->ntb, inbits | outbits); in perf_enable_service()
715 ret = ntb_msg_clear_mask(perf->ntb, incmd_bit); in perf_enable_service()
717 dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit); in perf_enable_service()
719 scnt = ntb_spad_count(perf->ntb); in perf_enable_service()
721 ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL); in perf_enable_service()
722 incmd_bit = PERF_SPAD_NOTIFY(perf->gidx); in perf_enable_service()
723 ret = ntb_db_clear_mask(perf->ntb, incmd_bit); in perf_enable_service()
725 dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit); in perf_enable_service()
728 ntb_clear_ctx(perf->ntb); in perf_enable_service()
732 ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); in perf_enable_service()
734 ntb_link_event(perf->ntb); in perf_enable_service()
739 static void perf_disable_service(struct perf_ctx *perf) in perf_disable_service() argument
743 if (perf->cmd_send == perf_msg_cmd_send) { in perf_disable_service()
746 inbits = ntb_msg_inbits(perf->ntb); in perf_disable_service()
747 (void)ntb_msg_set_mask(perf->ntb, inbits); in perf_disable_service()
749 (void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); in perf_disable_service()
752 ntb_clear_ctx(perf->ntb); in perf_disable_service()
754 for (pidx = 0; pidx < perf->pcnt; pidx++) in perf_disable_service()
755 perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR); in perf_disable_service()
757 for (pidx = 0; pidx < perf->pcnt; pidx++) in perf_disable_service()
758 flush_work(&perf->peers[pidx].service); in perf_disable_service()
760 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_disable_service()
761 struct perf_peer *peer = &perf->peers[pidx]; in perf_disable_service()
763 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0); in perf_disable_service()
766 ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); in perf_disable_service()
768 ntb_link_disable(perf->ntb); in perf_disable_service()
791 struct perf_peer *peer = pthr->perf->test_peer; in perf_copy_chunk()
852 return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR; in perf_copy_chunk()
862 struct perf_ctx *perf = data; in perf_dma_filter() local
865 node = dev_to_node(&perf->ntb->dev); in perf_dma_filter()
872 struct perf_ctx *perf = pthr->perf; in perf_init_test() local
874 struct perf_peer *peer = pthr->perf->test_peer; in perf_init_test()
876 pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL, in perf_init_test()
877 dev_to_node(&perf->ntb->dev)); in perf_init_test()
881 get_random_bytes(pthr->src, perf->test_peer->outbuf_size); in perf_init_test()
888 pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf); in perf_init_test()
890 dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n", in perf_init_test()
915 atomic_dec(&perf->tsync); in perf_init_test()
916 wake_up(&perf->twait); in perf_init_test()
923 struct perf_peer *peer = pthr->perf->test_peer; in perf_run_test()
924 struct perf_ctx *perf = pthr->perf; in perf_run_test() local
944 dev_err(&perf->ntb->dev, "%d: Got error %d on test\n", in perf_run_test()
967 struct perf_ctx *perf = pthr->perf; in perf_sync_test() local
974 atomic_read(&perf->tsync) < 0)); in perf_sync_test()
976 if (atomic_read(&perf->tsync) < 0) in perf_sync_test()
982 dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n", in perf_sync_test()
985 dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n", in perf_sync_test()
988 dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx, in perf_sync_test()
996 struct perf_ctx *perf = pthr->perf; in perf_clear_test() local
1006 if (pthr->perf->test_peer->dma_dst_addr) in perf_clear_test()
1008 pthr->perf->test_peer->dma_dst_addr, in perf_clear_test()
1009 pthr->perf->test_peer->outbuf_size, in perf_clear_test()
1015 atomic_dec(&perf->tsync); in perf_clear_test()
1016 wake_up(&perf->twait); in perf_clear_test()
1050 static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt) in perf_set_tcnt() argument
1055 if (test_and_set_bit_lock(0, &perf->busy_flag)) in perf_set_tcnt()
1058 perf->tcnt = tcnt; in perf_set_tcnt()
1060 clear_bit_unlock(0, &perf->busy_flag); in perf_set_tcnt()
1065 static void perf_terminate_test(struct perf_ctx *perf) in perf_terminate_test() argument
1069 atomic_set(&perf->tsync, -1); in perf_terminate_test()
1070 wake_up(&perf->twait); in perf_terminate_test()
1073 wake_up(&perf->threads[tidx].dma_wait); in perf_terminate_test()
1074 cancel_work_sync(&perf->threads[tidx].work); in perf_terminate_test()
1080 struct perf_ctx *perf = peer->perf; in perf_submit_test() local
1088 if (test_and_set_bit_lock(0, &perf->busy_flag)) in perf_submit_test()
1091 perf->test_peer = peer; in perf_submit_test()
1092 atomic_set(&perf->tsync, perf->tcnt); in perf_submit_test()
1095 pthr = &perf->threads[tidx]; in perf_submit_test()
1100 if (tidx < perf->tcnt) in perf_submit_test()
1104 ret = wait_event_interruptible(perf->twait, in perf_submit_test()
1105 atomic_read(&perf->tsync) <= 0); in perf_submit_test()
1107 perf_terminate_test(perf); in perf_submit_test()
1111 clear_bit_unlock(0, &perf->busy_flag); in perf_submit_test()
1116 static int perf_read_stats(struct perf_ctx *perf, char *buf, in perf_read_stats() argument
1122 if (test_and_set_bit_lock(0, &perf->busy_flag)) in perf_read_stats()
1126 " Peer %d test statistics:\n", perf->test_peer->pidx); in perf_read_stats()
1129 pthr = &perf->threads[tidx]; in perf_read_stats()
1146 clear_bit_unlock(0, &perf->busy_flag); in perf_read_stats()
1151 static void perf_init_threads(struct perf_ctx *perf) in perf_init_threads() argument
1156 perf->tcnt = DEF_THREADS_CNT; in perf_init_threads()
1157 perf->test_peer = &perf->peers[0]; in perf_init_threads()
1158 init_waitqueue_head(&perf->twait); in perf_init_threads()
1161 pthr = &perf->threads[tidx]; in perf_init_threads()
1163 pthr->perf = perf; in perf_init_threads()
1171 static void perf_clear_threads(struct perf_ctx *perf) in perf_clear_threads() argument
1173 perf_terminate_test(perf); in perf_clear_threads()
1184 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_read_info() local
1201 "Local port %d, Global index %d\n", ntb_port_number(perf->ntb), in perf_dbgfs_read_info()
1202 perf->gidx); in perf_dbgfs_read_info()
1204 if (test_bit(0, &perf->busy_flag)) { in perf_dbgfs_read_info()
1207 ntb_peer_port_number(perf->ntb, perf->test_peer->pidx), in perf_dbgfs_read_info()
1208 perf->test_peer->pidx); in perf_dbgfs_read_info()
1213 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_dbgfs_read_info()
1214 peer = &perf->peers[pidx]; in perf_dbgfs_read_info()
1218 ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx, in perf_dbgfs_read_info()
1267 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_read_run() local
1275 ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos); in perf_dbgfs_read_run()
1289 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_write_run() local
1297 if (pidx < 0 || pidx >= perf->pcnt) in perf_dbgfs_write_run()
1300 peer = &perf->peers[pidx]; in perf_dbgfs_write_run()
1318 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_read_tcnt() local
1322 pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt); in perf_dbgfs_read_tcnt()
1331 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_write_tcnt() local
1339 ret = perf_set_tcnt(perf, val); in perf_dbgfs_write_tcnt()
1352 static void perf_setup_dbgfs(struct perf_ctx *perf) in perf_setup_dbgfs() argument
1354 struct pci_dev *pdev = perf->ntb->pdev; in perf_setup_dbgfs()
1356 perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir); in perf_setup_dbgfs()
1357 if (!perf->dbgfs_dir) { in perf_setup_dbgfs()
1358 dev_warn(&perf->ntb->dev, "DebugFS unsupported\n"); in perf_setup_dbgfs()
1362 debugfs_create_file("info", 0600, perf->dbgfs_dir, perf, in perf_setup_dbgfs()
1365 debugfs_create_file("run", 0600, perf->dbgfs_dir, perf, in perf_setup_dbgfs()
1368 debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf, in perf_setup_dbgfs()
1372 debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order); in perf_setup_dbgfs()
1374 debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order); in perf_setup_dbgfs()
1376 debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma); in perf_setup_dbgfs()
1379 static void perf_clear_dbgfs(struct perf_ctx *perf) in perf_clear_dbgfs() argument
1381 debugfs_remove_recursive(perf->dbgfs_dir); in perf_clear_dbgfs()
1391 struct perf_ctx *perf; in perf_create_data() local
1393 perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL); in perf_create_data()
1394 if (!perf) in perf_create_data()
1397 perf->pcnt = ntb_peer_port_count(ntb); in perf_create_data()
1398 perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers), in perf_create_data()
1400 if (!perf->peers) in perf_create_data()
1403 perf->ntb = ntb; in perf_create_data()
1405 return perf; in perf_create_data()
1410 struct perf_ctx *perf = peer->perf; in perf_setup_peer_mw() local
1415 ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr, in perf_setup_peer_mw()
1420 peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr, in perf_setup_peer_mw()
1429 dev_warn(&peer->perf->ntb->dev, in perf_setup_peer_mw()
1437 static int perf_init_peers(struct perf_ctx *perf) in perf_init_peers() argument
1442 lport = ntb_port_number(perf->ntb); in perf_init_peers()
1443 perf->gidx = -1; in perf_init_peers()
1444 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_init_peers()
1445 peer = &perf->peers[pidx]; in perf_init_peers()
1447 peer->perf = perf; in perf_init_peers()
1449 if (lport < ntb_peer_port_number(perf->ntb, pidx)) { in perf_init_peers()
1450 if (perf->gidx == -1) in perf_init_peers()
1451 perf->gidx = pidx; in perf_init_peers()
1459 if (perf->gidx == -1) in perf_init_peers()
1460 perf->gidx = pidx; in perf_init_peers()
1466 if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 && in perf_init_peers()
1467 ntb_peer_port_number(perf->ntb, 0) == 0) { in perf_init_peers()
1468 perf->gidx = 0; in perf_init_peers()
1469 perf->peers[0].gidx = 0; in perf_init_peers()
1472 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_init_peers()
1473 ret = perf_setup_peer_mw(&perf->peers[pidx]); in perf_init_peers()
1478 dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx); in perf_init_peers()
1485 struct perf_ctx *perf; in perf_probe() local
1488 perf = perf_create_data(ntb); in perf_probe()
1489 if (IS_ERR(perf)) in perf_probe()
1490 return PTR_ERR(perf); in perf_probe()
1492 ret = perf_init_peers(perf); in perf_probe()
1496 perf_init_threads(perf); in perf_probe()
1498 ret = perf_init_service(perf); in perf_probe()
1502 ret = perf_enable_service(perf); in perf_probe()
1506 perf_setup_dbgfs(perf); in perf_probe()
1513 struct perf_ctx *perf = ntb->ctx; in perf_remove() local
1515 perf_clear_dbgfs(perf); in perf_remove()
1517 perf_disable_service(perf); in perf_remove()
1519 perf_clear_threads(perf); in perf_remove()