Lines Matching refs:idxd

36 	struct idxd_device *idxd = engine->idxd;  in engine_group_id_store()  local
45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in engine_group_id_store()
48 if (id > idxd->max_groups - 1 || id < -1) in engine_group_id_store()
63 engine->group = idxd->groups[id]; in engine_group_id_store()
102 static void idxd_set_free_rdbufs(struct idxd_device *idxd) in idxd_set_free_rdbufs() argument
106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { in idxd_set_free_rdbufs()
107 struct idxd_group *g = idxd->groups[i]; in idxd_set_free_rdbufs()
112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; in idxd_set_free_rdbufs()
137 struct idxd_device *idxd = group->idxd; in group_read_buffers_reserved_store() local
145 if (idxd->data->type == IDXD_TYPE_IAX) in group_read_buffers_reserved_store()
148 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_read_buffers_reserved_store()
151 if (idxd->state == IDXD_DEV_ENABLED) in group_read_buffers_reserved_store()
154 if (val > idxd->max_rdbufs) in group_read_buffers_reserved_store()
157 if (val > idxd->nr_rdbufs + group->rdbufs_reserved) in group_read_buffers_reserved_store()
161 idxd_set_free_rdbufs(idxd); in group_read_buffers_reserved_store()
203 struct idxd_device *idxd = group->idxd; in group_read_buffers_allowed_store() local
211 if (idxd->data->type == IDXD_TYPE_IAX) in group_read_buffers_allowed_store()
214 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_read_buffers_allowed_store()
217 if (idxd->state == IDXD_DEV_ENABLED) in group_read_buffers_allowed_store()
221 val > group->rdbufs_reserved + idxd->nr_rdbufs) in group_read_buffers_allowed_store()
266 struct idxd_device *idxd = group->idxd; in group_use_read_buffer_limit_store() local
274 if (idxd->data->type == IDXD_TYPE_IAX) in group_use_read_buffer_limit_store()
277 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_use_read_buffer_limit_store()
280 if (idxd->state == IDXD_DEV_ENABLED) in group_use_read_buffer_limit_store()
283 if (idxd->rdbuf_limit == 0) in group_use_read_buffer_limit_store()
311 struct idxd_device *idxd = group->idxd; in group_engines_show() local
313 for (i = 0; i < idxd->max_engines; i++) { in group_engines_show()
314 struct idxd_engine *engine = idxd->engines[i]; in group_engines_show()
320 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); in group_engines_show()
339 struct idxd_device *idxd = group->idxd; in group_work_queues_show() local
341 for (i = 0; i < idxd->max_wqs; i++) { in group_work_queues_show()
342 struct idxd_wq *wq = idxd->wqs[i]; in group_work_queues_show()
348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); in group_work_queues_show()
376 struct idxd_device *idxd = group->idxd; in group_traffic_class_a_store() local
384 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_a_store()
387 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_a_store()
390 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) in group_traffic_class_a_store()
418 struct idxd_device *idxd = group->idxd; in group_traffic_class_b_store() local
426 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in group_traffic_class_b_store()
429 if (idxd->state == IDXD_DEV_ENABLED) in group_traffic_class_b_store()
432 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) in group_traffic_class_b_store()
524 struct idxd_device *idxd) in idxd_group_attr_progress_limit_invisible() argument
528 !idxd->hw.group_cap.progress_limit; in idxd_group_attr_progress_limit_invisible()
532 struct idxd_device *idxd) in idxd_group_attr_read_buffers_invisible() argument
544 idxd->data->type == IDXD_TYPE_IAX; in idxd_group_attr_read_buffers_invisible()
552 struct idxd_device *idxd = group->idxd; in idxd_group_attr_visible() local
554 if (idxd_group_attr_progress_limit_invisible(attr, idxd)) in idxd_group_attr_visible()
557 if (idxd_group_attr_read_buffers_invisible(attr, idxd)) in idxd_group_attr_visible()
632 struct idxd_device *idxd = wq->idxd; in wq_group_id_store() local
641 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_group_id_store()
647 if (id > idxd->max_groups - 1 || id < -1) in wq_group_id_store()
658 group = idxd->groups[id]; in wq_group_id_store()
684 struct idxd_device *idxd = wq->idxd; in wq_mode_store() local
686 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_mode_store()
715 static int total_claimed_wq_size(struct idxd_device *idxd) in total_claimed_wq_size() argument
720 for (i = 0; i < idxd->max_wqs; i++) { in total_claimed_wq_size()
721 struct idxd_wq *wq = idxd->wqs[i]; in total_claimed_wq_size()
735 struct idxd_device *idxd = wq->idxd; in wq_size_store() local
742 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_size_store()
745 if (idxd->state == IDXD_DEV_ENABLED) in wq_size_store()
748 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) in wq_size_store()
772 struct idxd_device *idxd = wq->idxd; in wq_priority_store() local
779 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_priority_store()
808 struct idxd_device *idxd = wq->idxd; in wq_block_on_fault_store() local
812 if (!idxd->hw.gen_cap.block_on_fault) in wq_block_on_fault_store()
815 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_block_on_fault_store()
854 struct idxd_device *idxd = wq->idxd; in wq_threshold_store() local
865 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_threshold_store()
1011 struct idxd_device *idxd = wq->idxd; in wq_max_transfer_size_store() local
1015 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_max_transfer_size_store()
1025 if (xfer_size > idxd->max_xfer_bytes) in wq_max_transfer_size_store()
1048 struct idxd_device *idxd = wq->idxd; in wq_max_batch_size_store() local
1052 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_max_batch_size_store()
1062 if (batch_size > idxd->max_batch_size) in wq_max_batch_size_store()
1065 idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size); in wq_max_batch_size_store()
1084 struct idxd_device *idxd = wq->idxd; in wq_ats_disable_store() local
1091 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_ats_disable_store()
1120 struct idxd_device *idxd = wq->idxd; in wq_prs_disable_store() local
1127 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in wq_prs_disable_store()
1150 struct idxd_device *idxd = wq->idxd; in wq_occupancy_show() local
1153 if (!idxd->hw.wq_cap.occupancy) in wq_occupancy_show()
1156 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX); in wq_occupancy_show()
1157 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK; in wq_occupancy_show()
1231 static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask) in idxd_verify_supported_opcap() argument
1242 if (!test_bit(bit, idxd->opcap_bmap)) in idxd_verify_supported_opcap()
1253 struct idxd_device *idxd = wq->idxd; in wq_op_config_store() local
1268 rc = idxd_verify_supported_opcap(idxd, opmask); in wq_op_config_store()
1342 #define idxd_wq_attr_invisible(name, cap_field, a, idxd) \ argument
1343 ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
1346 struct idxd_device *idxd) in idxd_wq_attr_max_batch_size_invisible() argument
1350 idxd->data->type == IDXD_TYPE_IAX; in idxd_wq_attr_max_batch_size_invisible()
1358 struct idxd_device *idxd = wq->idxd; in idxd_wq_attr_visible() local
1360 if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd)) in idxd_wq_attr_visible()
1363 if (idxd_wq_attr_max_batch_size_invisible(attr, idxd)) in idxd_wq_attr_visible()
1366 if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd)) in idxd_wq_attr_visible()
1369 if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd)) in idxd_wq_attr_visible()
1405 struct idxd_device *idxd = confdev_to_idxd(dev); in version_show() local
1407 return sysfs_emit(buf, "%#x\n", idxd->hw.version); in version_show()
1415 struct idxd_device *idxd = confdev_to_idxd(dev); in max_work_queues_size_show() local
1417 return sysfs_emit(buf, "%u\n", idxd->max_wq_size); in max_work_queues_size_show()
1424 struct idxd_device *idxd = confdev_to_idxd(dev); in max_groups_show() local
1426 return sysfs_emit(buf, "%u\n", idxd->max_groups); in max_groups_show()
1433 struct idxd_device *idxd = confdev_to_idxd(dev); in max_work_queues_show() local
1435 return sysfs_emit(buf, "%u\n", idxd->max_wqs); in max_work_queues_show()
1442 struct idxd_device *idxd = confdev_to_idxd(dev); in max_engines_show() local
1444 return sysfs_emit(buf, "%u\n", idxd->max_engines); in max_engines_show()
1451 struct idxd_device *idxd = confdev_to_idxd(dev); in numa_node_show() local
1453 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); in numa_node_show()
1460 struct idxd_device *idxd = confdev_to_idxd(dev); in max_batch_size_show() local
1462 return sysfs_emit(buf, "%u\n", idxd->max_batch_size); in max_batch_size_show()
1470 struct idxd_device *idxd = confdev_to_idxd(dev); in max_transfer_size_show() local
1472 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); in max_transfer_size_show()
1479 struct idxd_device *idxd = confdev_to_idxd(dev); in op_cap_show() local
1481 return op_cap_show_common(dev, buf, idxd->opcap_bmap); in op_cap_show()
1488 struct idxd_device *idxd = confdev_to_idxd(dev); in gen_cap_show() local
1490 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); in gen_cap_show()
1497 struct idxd_device *idxd = confdev_to_idxd(dev); in configurable_show() local
1499 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); in configurable_show()
1506 struct idxd_device *idxd = confdev_to_idxd(dev); in clients_show() local
1509 spin_lock(&idxd->dev_lock); in clients_show()
1510 for (i = 0; i < idxd->max_wqs; i++) { in clients_show()
1511 struct idxd_wq *wq = idxd->wqs[i]; in clients_show()
1515 spin_unlock(&idxd->dev_lock); in clients_show()
1524 struct idxd_device *idxd = confdev_to_idxd(dev); in pasid_enabled_show() local
1526 return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd)); in pasid_enabled_show()
1533 struct idxd_device *idxd = confdev_to_idxd(dev); in state_show() local
1535 switch (idxd->state) { in state_show()
1551 struct idxd_device *idxd = confdev_to_idxd(dev); in errors_show() local
1555 spin_lock(&idxd->dev_lock); in errors_show()
1556 multi_u64_to_bmap(swerr_bmap, &idxd->sw_err.bits[0], 4); in errors_show()
1557 spin_unlock(&idxd->dev_lock); in errors_show()
1565 struct idxd_device *idxd = confdev_to_idxd(dev); in max_read_buffers_show() local
1567 return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); in max_read_buffers_show()
1583 struct idxd_device *idxd = confdev_to_idxd(dev); in read_buffer_limit_show() local
1585 return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); in read_buffer_limit_show()
1599 struct idxd_device *idxd = confdev_to_idxd(dev); in read_buffer_limit_store() local
1607 if (idxd->state == IDXD_DEV_ENABLED) in read_buffer_limit_store()
1610 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in read_buffer_limit_store()
1613 if (!idxd->hw.group_cap.rdbuf_limit) in read_buffer_limit_store()
1616 if (val > idxd->hw.group_cap.total_rdbufs) in read_buffer_limit_store()
1619 idxd->rdbuf_limit = val; in read_buffer_limit_store()
1637 struct idxd_device *idxd = confdev_to_idxd(dev); in cdev_major_show() local
1639 return sysfs_emit(buf, "%u\n", idxd->major); in cdev_major_show()
1646 struct idxd_device *idxd = confdev_to_idxd(dev); in cmd_status_show() local
1648 return sysfs_emit(buf, "%#x\n", idxd->cmd_status); in cmd_status_show()
1654 struct idxd_device *idxd = confdev_to_idxd(dev); in cmd_status_store() local
1656 idxd->cmd_status = 0; in cmd_status_store()
1664 struct idxd_device *idxd = confdev_to_idxd(dev); in iaa_cap_show() local
1666 if (idxd->hw.version < DEVICE_VERSION_2) in iaa_cap_show()
1669 return sysfs_emit(buf, "%#llx\n", idxd->hw.iaa_cap.bits); in iaa_cap_show()
1676 struct idxd_device *idxd = confdev_to_idxd(dev); in event_log_size_show() local
1678 if (!idxd->evl) in event_log_size_show()
1681 return sysfs_emit(buf, "%u\n", idxd->evl->size); in event_log_size_show()
1688 struct idxd_device *idxd = confdev_to_idxd(dev); in event_log_size_store() local
1692 if (!idxd->evl) in event_log_size_store()
1699 if (idxd->state == IDXD_DEV_ENABLED) in event_log_size_store()
1702 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in event_log_size_store()
1706 (val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma)) in event_log_size_store()
1709 idxd->evl->size = val; in event_log_size_store()
1715 struct idxd_device *idxd) in idxd_device_attr_max_batch_size_invisible() argument
1719 idxd->data->type == IDXD_TYPE_IAX; in idxd_device_attr_max_batch_size_invisible()
1723 struct idxd_device *idxd) in idxd_device_attr_read_buffers_invisible() argument
1733 idxd->data->type == IDXD_TYPE_IAX; in idxd_device_attr_read_buffers_invisible()
1737 struct idxd_device *idxd) in idxd_device_attr_iaa_cap_invisible() argument
1740 (idxd->data->type != IDXD_TYPE_IAX || in idxd_device_attr_iaa_cap_invisible()
1741 idxd->hw.version < DEVICE_VERSION_2); in idxd_device_attr_iaa_cap_invisible()
1745 struct idxd_device *idxd) in idxd_device_attr_event_log_size_invisible() argument
1748 !idxd->hw.gen_cap.evl_support); in idxd_device_attr_event_log_size_invisible()
1755 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_device_attr_visible() local
1757 if (idxd_device_attr_max_batch_size_invisible(attr, idxd)) in idxd_device_attr_visible()
1760 if (idxd_device_attr_read_buffers_invisible(attr, idxd)) in idxd_device_attr_visible()
1763 if (idxd_device_attr_iaa_cap_invisible(attr, idxd)) in idxd_device_attr_visible()
1766 if (idxd_device_attr_event_log_size_invisible(attr, idxd)) in idxd_device_attr_visible()
1811 struct idxd_device *idxd = confdev_to_idxd(dev); in idxd_conf_device_release() local
1813 kfree(idxd->groups); in idxd_conf_device_release()
1814 bitmap_free(idxd->wq_enable_map); in idxd_conf_device_release()
1815 kfree(idxd->wqs); in idxd_conf_device_release()
1816 kfree(idxd->engines); in idxd_conf_device_release()
1817 kfree(idxd->evl); in idxd_conf_device_release()
1818 kmem_cache_destroy(idxd->evl_cache); in idxd_conf_device_release()
1819 ida_free(&idxd_ida, idxd->id); in idxd_conf_device_release()
1820 bitmap_free(idxd->opcap_bmap); in idxd_conf_device_release()
1821 kfree(idxd); in idxd_conf_device_release()
1836 static int idxd_register_engine_devices(struct idxd_device *idxd) in idxd_register_engine_devices() argument
1841 for (i = 0; i < idxd->max_engines; i++) { in idxd_register_engine_devices()
1842 engine = idxd->engines[i]; in idxd_register_engine_devices()
1852 for (; i < idxd->max_engines; i++) { in idxd_register_engine_devices()
1853 engine = idxd->engines[i]; in idxd_register_engine_devices()
1858 engine = idxd->engines[j]; in idxd_register_engine_devices()
1864 static int idxd_register_group_devices(struct idxd_device *idxd) in idxd_register_group_devices() argument
1869 for (i = 0; i < idxd->max_groups; i++) { in idxd_register_group_devices()
1870 group = idxd->groups[i]; in idxd_register_group_devices()
1880 for (; i < idxd->max_groups; i++) { in idxd_register_group_devices()
1881 group = idxd->groups[i]; in idxd_register_group_devices()
1886 group = idxd->groups[j]; in idxd_register_group_devices()
1892 static int idxd_register_wq_devices(struct idxd_device *idxd) in idxd_register_wq_devices() argument
1897 for (i = 0; i < idxd->max_wqs; i++) { in idxd_register_wq_devices()
1898 wq = idxd->wqs[i]; in idxd_register_wq_devices()
1908 for (; i < idxd->max_wqs; i++) { in idxd_register_wq_devices()
1909 wq = idxd->wqs[i]; in idxd_register_wq_devices()
1914 wq = idxd->wqs[j]; in idxd_register_wq_devices()
1920 int idxd_register_devices(struct idxd_device *idxd) in idxd_register_devices() argument
1922 struct device *dev = &idxd->pdev->dev; in idxd_register_devices()
1925 rc = device_add(idxd_confdev(idxd)); in idxd_register_devices()
1929 rc = idxd_register_wq_devices(idxd); in idxd_register_devices()
1935 rc = idxd_register_engine_devices(idxd); in idxd_register_devices()
1941 rc = idxd_register_group_devices(idxd); in idxd_register_devices()
1950 for (i = 0; i < idxd->max_engines; i++) in idxd_register_devices()
1951 device_unregister(engine_confdev(idxd->engines[i])); in idxd_register_devices()
1953 for (i = 0; i < idxd->max_wqs; i++) in idxd_register_devices()
1954 device_unregister(wq_confdev(idxd->wqs[i])); in idxd_register_devices()
1956 device_del(idxd_confdev(idxd)); in idxd_register_devices()
1960 void idxd_unregister_devices(struct idxd_device *idxd) in idxd_unregister_devices() argument
1964 for (i = 0; i < idxd->max_wqs; i++) { in idxd_unregister_devices()
1965 struct idxd_wq *wq = idxd->wqs[i]; in idxd_unregister_devices()
1970 for (i = 0; i < idxd->max_engines; i++) { in idxd_unregister_devices()
1971 struct idxd_engine *engine = idxd->engines[i]; in idxd_unregister_devices()
1976 for (i = 0; i < idxd->max_groups; i++) { in idxd_unregister_devices()
1977 struct idxd_group *group = idxd->groups[i]; in idxd_unregister_devices()