Lines Matching refs:idxd

96 	struct idxd_device *idxd = idxd_pmu->idxd;  in perfmon_assign_hw_event()  local
100 hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx)); in perfmon_assign_hw_event()
101 hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx)); in perfmon_assign_hw_event()
170 struct idxd_device *idxd; in perfmon_pmu_event_init() local
173 idxd = event_to_idxd(event); in perfmon_pmu_event_init()
186 if (event->pmu != &idxd->idxd_pmu->pmu) in perfmon_pmu_event_init()
189 event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); in perfmon_pmu_event_init()
194 ret = perfmon_validate_group(idxd->idxd_pmu, event); in perfmon_pmu_event_init()
202 struct idxd_device *idxd; in perfmon_pmu_read_counter() local
205 idxd = event_to_idxd(event); in perfmon_pmu_read_counter()
207 return ioread64(CNTRDATA_REG(idxd, cntr)); in perfmon_pmu_read_counter()
212 struct idxd_device *idxd = event_to_idxd(event); in perfmon_pmu_event_update() local
214 int shift = 64 - idxd->idxd_pmu->counter_width; in perfmon_pmu_event_update()
230 void perfmon_counter_overflow(struct idxd_device *idxd) in perfmon_counter_overflow() argument
236 n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE); in perfmon_counter_overflow()
238 ovfstatus = ioread32(OVFSTATUS_REG(idxd)); in perfmon_counter_overflow()
254 event = idxd->idxd_pmu->event_list[i]; in perfmon_counter_overflow()
258 iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd)); in perfmon_counter_overflow()
261 ovfstatus = ioread32(OVFSTATUS_REG(idxd)); in perfmon_counter_overflow()
271 static inline void perfmon_reset_config(struct idxd_device *idxd) in perfmon_reset_config() argument
273 iowrite32(CONFIG_RESET, PERFRST_REG(idxd)); in perfmon_reset_config()
274 iowrite32(0, OVFSTATUS_REG(idxd)); in perfmon_reset_config()
275 iowrite32(0, PERFFRZ_REG(idxd)); in perfmon_reset_config()
278 static inline void perfmon_reset_counters(struct idxd_device *idxd) in perfmon_reset_counters() argument
280 iowrite32(CNTR_RESET, PERFRST_REG(idxd)); in perfmon_reset_counters()
283 static inline void perfmon_reset(struct idxd_device *idxd) in perfmon_reset() argument
285 perfmon_reset_config(idxd); in perfmon_reset()
286 perfmon_reset_counters(idxd); in perfmon_reset()
296 struct idxd_device *idxd; in perfmon_pmu_event_start() local
299 idxd = event_to_idxd(event); in perfmon_pmu_event_start()
317 if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters)) in perfmon_pmu_event_start()
318 iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ)); in perfmon_pmu_event_start()
319 if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters)) in perfmon_pmu_event_start()
320 iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC)); in perfmon_pmu_event_start()
321 if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters)) in perfmon_pmu_event_start()
322 iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ)); in perfmon_pmu_event_start()
323 if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters)) in perfmon_pmu_event_start()
324 iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ)); in perfmon_pmu_event_start()
325 if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters)) in perfmon_pmu_event_start()
326 iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG)); in perfmon_pmu_event_start()
329 cntrdata = ioread64(CNTRDATA_REG(idxd, cntr)); in perfmon_pmu_event_start()
338 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); in perfmon_pmu_event_start()
344 struct idxd_device *idxd; in perfmon_pmu_event_stop() local
348 idxd = event_to_idxd(event); in perfmon_pmu_event_stop()
351 for (i = 0; i < idxd->idxd_pmu->n_events; i++) { in perfmon_pmu_event_stop()
352 if (event != idxd->idxd_pmu->event_list[i]) in perfmon_pmu_event_stop()
355 for (++i; i < idxd->idxd_pmu->n_events; i++) in perfmon_pmu_event_stop()
356 idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i]; in perfmon_pmu_event_stop()
357 --idxd->idxd_pmu->n_events; in perfmon_pmu_event_stop()
361 cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr)); in perfmon_pmu_event_stop()
363 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); in perfmon_pmu_event_stop()
369 clear_bit(cntr, idxd->idxd_pmu->used_mask); in perfmon_pmu_event_stop()
379 struct idxd_device *idxd = event_to_idxd(event); in perfmon_pmu_event_add() local
380 struct idxd_pmu *idxd_pmu = idxd->idxd_pmu; in perfmon_pmu_event_add()
406 static void enable_perfmon_pmu(struct idxd_device *idxd) in enable_perfmon_pmu() argument
408 iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd)); in enable_perfmon_pmu()
411 static void disable_perfmon_pmu(struct idxd_device *idxd) in disable_perfmon_pmu() argument
413 iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd)); in disable_perfmon_pmu()
418 struct idxd_device *idxd = pmu_to_idxd(pmu); in perfmon_pmu_enable() local
420 enable_perfmon_pmu(idxd); in perfmon_pmu_enable()
425 struct idxd_device *idxd = pmu_to_idxd(pmu); in perfmon_pmu_disable() local
427 disable_perfmon_pmu(idxd); in perfmon_pmu_disable()
464 void perfmon_pmu_remove(struct idxd_device *idxd) in perfmon_pmu_remove() argument
466 if (!idxd->idxd_pmu) in perfmon_pmu_remove()
469 perf_pmu_unregister(&idxd->idxd_pmu->pmu); in perfmon_pmu_remove()
470 kfree(idxd->idxd_pmu); in perfmon_pmu_remove()
471 idxd->idxd_pmu = NULL; in perfmon_pmu_remove()
474 int perfmon_pmu_init(struct idxd_device *idxd) in perfmon_pmu_init() argument
484 if (idxd->perfmon_offset == 0) in perfmon_pmu_init()
491 idxd_pmu->idxd = idxd; in perfmon_pmu_init()
492 idxd->idxd_pmu = idxd_pmu; in perfmon_pmu_init()
494 if (idxd->data->type == IDXD_TYPE_DSA) { in perfmon_pmu_init()
495 rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id); in perfmon_pmu_init()
498 } else if (idxd->data->type == IDXD_TYPE_IAX) { in perfmon_pmu_init()
499 rc = sprintf(idxd_pmu->name, "iax%d", idxd->id); in perfmon_pmu_init()
506 perfmon_reset(idxd); in perfmon_pmu_init()
508 perfcap.bits = ioread64(PERFCAP_REG(idxd)); in perfmon_pmu_init()
559 idxd->idxd_pmu = NULL; in perfmon_pmu_init()