Lines Matching full:perf

45  * PCIe NTB Perf Linux driver
126 * Perf driver data definition
144 struct perf_ctx *perf; member
169 struct perf_ctx *perf; member
205 int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
247 static void perf_terminate_test(struct perf_ctx *perf);
253 link = ntb_link_is_up(peer->perf->ntb, NULL, NULL); in perf_link_is_up()
260 struct perf_ctx *perf = peer->perf; in perf_spad_cmd_send() local
264 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data); in perf_spad_cmd_send()
277 sts = ntb_peer_spad_read(perf->ntb, peer->pidx, in perf_spad_cmd_send()
278 PERF_SPAD_CMD(perf->gidx)); in perf_spad_cmd_send()
284 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send()
285 PERF_SPAD_LDATA(perf->gidx), in perf_spad_cmd_send()
287 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send()
288 PERF_SPAD_HDATA(perf->gidx), in perf_spad_cmd_send()
290 ntb_peer_spad_write(perf->ntb, peer->pidx, in perf_spad_cmd_send()
291 PERF_SPAD_CMD(perf->gidx), in perf_spad_cmd_send()
293 ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx)); in perf_spad_cmd_send()
295 dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n", in perf_spad_cmd_send()
304 static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx, in perf_spad_cmd_recv() argument
310 ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); in perf_spad_cmd_recv()
318 for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) { in perf_spad_cmd_recv()
319 peer = &perf->peers[*pidx]; in perf_spad_cmd_recv()
324 val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx)); in perf_spad_cmd_recv()
330 val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx)); in perf_spad_cmd_recv()
333 val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx)); in perf_spad_cmd_recv()
337 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), in perf_spad_cmd_recv()
340 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data); in perf_spad_cmd_recv()
351 struct perf_ctx *perf = peer->perf; in perf_msg_cmd_send() local
355 dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data); in perf_msg_cmd_send()
364 outbits = ntb_msg_outbits(perf->ntb); in perf_msg_cmd_send()
369 ret = ntb_msg_clear_sts(perf->ntb, outbits); in perf_msg_cmd_send()
373 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA, in perf_msg_cmd_send()
376 if (ntb_msg_read_sts(perf->ntb) & outbits) { in perf_msg_cmd_send()
381 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA, in perf_msg_cmd_send()
385 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd); in perf_msg_cmd_send()
393 static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx, in perf_msg_cmd_recv() argument
399 inbits = ntb_msg_inbits(perf->ntb); in perf_msg_cmd_recv()
401 if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3) in perf_msg_cmd_recv()
404 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD); in perf_msg_cmd_recv()
407 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA); in perf_msg_cmd_recv()
410 val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA); in perf_msg_cmd_recv()
414 ntb_msg_clear_sts(perf->ntb, inbits); in perf_msg_cmd_recv()
416 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data); in perf_msg_cmd_recv()
423 struct perf_ctx *perf = peer->perf; in perf_cmd_send() local
426 return perf->cmd_send(peer, cmd, data); in perf_cmd_send()
428 dev_err(&perf->ntb->dev, "Send invalid command\n"); in perf_cmd_send()
442 dev_err(&peer->perf->ntb->dev, "Exec invalid command\n"); in perf_cmd_exec()
449 dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd); in perf_cmd_exec()
456 static int perf_cmd_recv(struct perf_ctx *perf) in perf_cmd_recv() argument
462 while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) { in perf_cmd_recv()
463 peer = &perf->peers[pidx]; in perf_cmd_recv()
473 dev_err(&perf->ntb->dev, "Recv invalid command\n"); in perf_cmd_recv()
484 struct perf_ctx *perf = ctx; in perf_link_event() local
489 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_link_event()
490 peer = &perf->peers[pidx]; in perf_link_event()
506 struct perf_ctx *perf = ctx; in perf_db_event() local
508 dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec, in perf_db_event()
509 ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb)); in perf_db_event()
512 (void)perf_cmd_recv(perf); in perf_db_event()
517 struct perf_ctx *perf = ctx; in perf_msg_event() local
519 dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n", in perf_msg_event()
520 ntb_msg_read_sts(perf->ntb)); in perf_msg_event()
523 (void)perf_cmd_recv(perf); in perf_msg_event()
534 (void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); in perf_free_outbuf()
539 struct perf_ctx *perf = peer->perf; in perf_setup_outbuf() local
543 ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx, in perf_setup_outbuf()
546 dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n"); in perf_setup_outbuf()
562 (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx); in perf_free_inbuf()
563 dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size, in perf_free_inbuf()
571 struct perf_ctx *perf = peer->perf; in perf_setup_inbuf() local
575 ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx, in perf_setup_inbuf()
578 dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n"); in perf_setup_inbuf()
583 dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n", in perf_setup_inbuf()
592 peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev, in perf_setup_inbuf()
596 dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n", in perf_setup_inbuf()
602 dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n"); in perf_setup_inbuf()
606 ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx, in perf_setup_inbuf()
609 dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n"); in perf_setup_inbuf()
647 if (test_bit(0, &peer->perf->busy_flag) && in perf_service_work()
648 peer == peer->perf->test_peer) { in perf_service_work()
649 dev_warn(&peer->perf->ntb->dev, in perf_service_work()
651 perf_terminate_test(peer->perf); in perf_service_work()
658 static int perf_init_service(struct perf_ctx *perf) in perf_init_service() argument
662 if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) { in perf_init_service()
663 dev_err(&perf->ntb->dev, "Not enough memory windows\n"); in perf_init_service()
667 if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) { in perf_init_service()
668 perf->cmd_send = perf_msg_cmd_send; in perf_init_service()
669 perf->cmd_recv = perf_msg_cmd_recv; in perf_init_service()
671 dev_dbg(&perf->ntb->dev, "Message service initialized\n"); in perf_init_service()
676 dev_dbg(&perf->ntb->dev, "Message service unsupported\n"); in perf_init_service()
678 mask = GENMASK_ULL(perf->pcnt, 0); in perf_init_service()
679 if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) && in perf_init_service()
680 (ntb_db_valid_mask(perf->ntb) & mask) == mask) { in perf_init_service()
681 perf->cmd_send = perf_spad_cmd_send; in perf_init_service()
682 perf->cmd_recv = perf_spad_cmd_recv; in perf_init_service()
684 dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n"); in perf_init_service()
689 dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n"); in perf_init_service()
691 dev_err(&perf->ntb->dev, "Command services unsupported\n"); in perf_init_service()
696 static int perf_enable_service(struct perf_ctx *perf) in perf_enable_service() argument
701 mask = ntb_db_valid_mask(perf->ntb); in perf_enable_service()
702 (void)ntb_db_set_mask(perf->ntb, mask); in perf_enable_service()
704 ret = ntb_set_ctx(perf->ntb, perf, &perf_ops); in perf_enable_service()
708 if (perf->cmd_send == perf_msg_cmd_send) { in perf_enable_service()
711 inbits = ntb_msg_inbits(perf->ntb); in perf_enable_service()
712 outbits = ntb_msg_outbits(perf->ntb); in perf_enable_service()
713 (void)ntb_msg_set_mask(perf->ntb, inbits | outbits); in perf_enable_service()
716 ret = ntb_msg_clear_mask(perf->ntb, incmd_bit); in perf_enable_service()
718 dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit); in perf_enable_service()
720 scnt = ntb_spad_count(perf->ntb); in perf_enable_service()
722 ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL); in perf_enable_service()
723 incmd_bit = PERF_SPAD_NOTIFY(perf->gidx); in perf_enable_service()
724 ret = ntb_db_clear_mask(perf->ntb, incmd_bit); in perf_enable_service()
726 dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit); in perf_enable_service()
729 ntb_clear_ctx(perf->ntb); in perf_enable_service()
733 ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); in perf_enable_service()
735 ntb_link_event(perf->ntb); in perf_enable_service()
740 static void perf_disable_service(struct perf_ctx *perf) in perf_disable_service() argument
744 if (perf->cmd_send == perf_msg_cmd_send) { in perf_disable_service()
747 inbits = ntb_msg_inbits(perf->ntb); in perf_disable_service()
748 (void)ntb_msg_set_mask(perf->ntb, inbits); in perf_disable_service()
750 (void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); in perf_disable_service()
753 ntb_clear_ctx(perf->ntb); in perf_disable_service()
755 for (pidx = 0; pidx < perf->pcnt; pidx++) in perf_disable_service()
756 perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR); in perf_disable_service()
758 for (pidx = 0; pidx < perf->pcnt; pidx++) in perf_disable_service()
759 flush_work(&perf->peers[pidx].service); in perf_disable_service()
761 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_disable_service()
762 struct perf_peer *peer = &perf->peers[pidx]; in perf_disable_service()
764 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0); in perf_disable_service()
767 ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx)); in perf_disable_service()
769 ntb_link_disable(perf->ntb); in perf_disable_service()
792 struct perf_peer *peer = pthr->perf->test_peer; in perf_copy_chunk()
853 return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR; in perf_copy_chunk()
863 struct perf_ctx *perf = data; in perf_dma_filter() local
866 node = dev_to_node(&perf->ntb->dev); in perf_dma_filter()
873 struct perf_ctx *perf = pthr->perf; in perf_init_test() local
875 struct perf_peer *peer = pthr->perf->test_peer; in perf_init_test()
877 pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL, in perf_init_test()
878 dev_to_node(&perf->ntb->dev)); in perf_init_test()
882 get_random_bytes(pthr->src, perf->test_peer->outbuf_size); in perf_init_test()
889 pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf); in perf_init_test()
891 dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n", in perf_init_test()
916 atomic_dec(&perf->tsync); in perf_init_test()
917 wake_up(&perf->twait); in perf_init_test()
924 struct perf_peer *peer = pthr->perf->test_peer; in perf_run_test()
925 struct perf_ctx *perf = pthr->perf; in perf_run_test() local
945 dev_err(&perf->ntb->dev, "%d: Got error %d on test\n", in perf_run_test()
968 struct perf_ctx *perf = pthr->perf; in perf_sync_test() local
975 atomic_read(&perf->tsync) < 0)); in perf_sync_test()
977 if (atomic_read(&perf->tsync) < 0) in perf_sync_test()
983 dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n", in perf_sync_test()
986 dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n", in perf_sync_test()
989 dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx, in perf_sync_test()
997 struct perf_ctx *perf = pthr->perf; in perf_clear_test() local
1007 if (pthr->perf->test_peer->dma_dst_addr) in perf_clear_test()
1009 pthr->perf->test_peer->dma_dst_addr, in perf_clear_test()
1010 pthr->perf->test_peer->outbuf_size, in perf_clear_test()
1016 atomic_dec(&perf->tsync); in perf_clear_test()
1017 wake_up(&perf->twait); in perf_clear_test()
1051 static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt) in perf_set_tcnt() argument
1056 if (test_and_set_bit_lock(0, &perf->busy_flag)) in perf_set_tcnt()
1059 perf->tcnt = tcnt; in perf_set_tcnt()
1061 clear_bit_unlock(0, &perf->busy_flag); in perf_set_tcnt()
1066 static void perf_terminate_test(struct perf_ctx *perf) in perf_terminate_test() argument
1070 atomic_set(&perf->tsync, -1); in perf_terminate_test()
1071 wake_up(&perf->twait); in perf_terminate_test()
1074 wake_up(&perf->threads[tidx].dma_wait); in perf_terminate_test()
1075 cancel_work_sync(&perf->threads[tidx].work); in perf_terminate_test()
1081 struct perf_ctx *perf = peer->perf; in perf_submit_test() local
1089 if (test_and_set_bit_lock(0, &perf->busy_flag)) in perf_submit_test()
1092 perf->test_peer = peer; in perf_submit_test()
1093 atomic_set(&perf->tsync, perf->tcnt); in perf_submit_test()
1096 pthr = &perf->threads[tidx]; in perf_submit_test()
1101 if (tidx < perf->tcnt) in perf_submit_test()
1105 ret = wait_event_interruptible(perf->twait, in perf_submit_test()
1106 atomic_read(&perf->tsync) <= 0); in perf_submit_test()
1108 perf_terminate_test(perf); in perf_submit_test()
1112 clear_bit_unlock(0, &perf->busy_flag); in perf_submit_test()
1117 static int perf_read_stats(struct perf_ctx *perf, char *buf, in perf_read_stats() argument
1123 if (test_and_set_bit_lock(0, &perf->busy_flag)) in perf_read_stats()
1127 " Peer %d test statistics:\n", perf->test_peer->pidx); in perf_read_stats()
1130 pthr = &perf->threads[tidx]; in perf_read_stats()
1147 clear_bit_unlock(0, &perf->busy_flag); in perf_read_stats()
1152 static void perf_init_threads(struct perf_ctx *perf) in perf_init_threads() argument
1157 perf->tcnt = DEF_THREADS_CNT; in perf_init_threads()
1158 perf->test_peer = &perf->peers[0]; in perf_init_threads()
1159 init_waitqueue_head(&perf->twait); in perf_init_threads()
1162 pthr = &perf->threads[tidx]; in perf_init_threads()
1164 pthr->perf = perf; in perf_init_threads()
1172 static void perf_clear_threads(struct perf_ctx *perf) in perf_clear_threads() argument
1174 perf_terminate_test(perf); in perf_clear_threads()
1185 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_read_info() local
1202 "Local port %d, Global index %d\n", ntb_port_number(perf->ntb), in perf_dbgfs_read_info()
1203 perf->gidx); in perf_dbgfs_read_info()
1205 if (test_bit(0, &perf->busy_flag)) { in perf_dbgfs_read_info()
1208 ntb_peer_port_number(perf->ntb, perf->test_peer->pidx), in perf_dbgfs_read_info()
1209 perf->test_peer->pidx); in perf_dbgfs_read_info()
1214 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_dbgfs_read_info()
1215 peer = &perf->peers[pidx]; in perf_dbgfs_read_info()
1219 ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx, in perf_dbgfs_read_info()
1268 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_read_run() local
1276 ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos); in perf_dbgfs_read_run()
1290 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_write_run() local
1298 if (pidx < 0 || pidx >= perf->pcnt) in perf_dbgfs_write_run()
1301 peer = &perf->peers[pidx]; in perf_dbgfs_write_run()
1319 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_read_tcnt() local
1323 pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt); in perf_dbgfs_read_tcnt()
1332 struct perf_ctx *perf = filep->private_data; in perf_dbgfs_write_tcnt() local
1340 ret = perf_set_tcnt(perf, val); in perf_dbgfs_write_tcnt()
1353 static void perf_setup_dbgfs(struct perf_ctx *perf) in perf_setup_dbgfs() argument
1355 struct pci_dev *pdev = perf->ntb->pdev; in perf_setup_dbgfs()
1357 perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir); in perf_setup_dbgfs()
1358 if (IS_ERR(perf->dbgfs_dir)) { in perf_setup_dbgfs()
1359 dev_warn(&perf->ntb->dev, "DebugFS unsupported\n"); in perf_setup_dbgfs()
1363 debugfs_create_file("info", 0600, perf->dbgfs_dir, perf, in perf_setup_dbgfs()
1366 debugfs_create_file("run", 0600, perf->dbgfs_dir, perf, in perf_setup_dbgfs()
1369 debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf, in perf_setup_dbgfs()
1373 debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order); in perf_setup_dbgfs()
1375 debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order); in perf_setup_dbgfs()
1377 debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma); in perf_setup_dbgfs()
1380 static void perf_clear_dbgfs(struct perf_ctx *perf) in perf_clear_dbgfs() argument
1382 debugfs_remove_recursive(perf->dbgfs_dir); in perf_clear_dbgfs()
1392 struct perf_ctx *perf; in perf_create_data() local
1394 perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL); in perf_create_data()
1395 if (!perf) in perf_create_data()
1398 perf->pcnt = ntb_peer_port_count(ntb); in perf_create_data()
1399 perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers), in perf_create_data()
1401 if (!perf->peers) in perf_create_data()
1404 perf->ntb = ntb; in perf_create_data()
1406 return perf; in perf_create_data()
1411 struct perf_ctx *perf = peer->perf; in perf_setup_peer_mw() local
1416 ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr, in perf_setup_peer_mw()
1421 peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr, in perf_setup_peer_mw()
1430 dev_warn(&peer->perf->ntb->dev, in perf_setup_peer_mw()
1438 static int perf_init_peers(struct perf_ctx *perf) in perf_init_peers() argument
1443 lport = ntb_port_number(perf->ntb); in perf_init_peers()
1444 perf->gidx = -1; in perf_init_peers()
1445 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_init_peers()
1446 peer = &perf->peers[pidx]; in perf_init_peers()
1448 peer->perf = perf; in perf_init_peers()
1450 if (lport < ntb_peer_port_number(perf->ntb, pidx)) { in perf_init_peers()
1451 if (perf->gidx == -1) in perf_init_peers()
1452 perf->gidx = pidx; in perf_init_peers()
1460 if (perf->gidx == -1) in perf_init_peers()
1461 perf->gidx = pidx; in perf_init_peers()
1467 if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 && in perf_init_peers()
1468 ntb_peer_port_number(perf->ntb, 0) == 0) { in perf_init_peers()
1469 perf->gidx = 0; in perf_init_peers()
1470 perf->peers[0].gidx = 0; in perf_init_peers()
1473 for (pidx = 0; pidx < perf->pcnt; pidx++) { in perf_init_peers()
1474 ret = perf_setup_peer_mw(&perf->peers[pidx]); in perf_init_peers()
1479 dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx); in perf_init_peers()
1486 struct perf_ctx *perf; in perf_probe() local
1489 perf = perf_create_data(ntb); in perf_probe()
1490 if (IS_ERR(perf)) in perf_probe()
1491 return PTR_ERR(perf); in perf_probe()
1493 ret = perf_init_peers(perf); in perf_probe()
1497 perf_init_threads(perf); in perf_probe()
1499 ret = perf_init_service(perf); in perf_probe()
1503 ret = perf_enable_service(perf); in perf_probe()
1507 perf_setup_dbgfs(perf); in perf_probe()
1514 struct perf_ctx *perf = ntb->ctx; in perf_remove() local
1516 perf_clear_dbgfs(perf); in perf_remove()
1518 perf_disable_service(perf); in perf_remove()
1520 perf_clear_threads(perf); in perf_remove()