Lines Matching +full:entry +full:- +full:address

1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/io-64-nonatomic-lo-hi.h>
34 return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW); in intel_pmt_is_early_client_hw()
45 return -EFAULT; in pmt_memcpy64_fromio()
64 if (cb && cb->read_telem) in pmt_telem_read_mmio()
65 return cb->read_telem(pdev, guid, buf, count); in pmt_telem_read_mmio()
68 /* PUNIT on SPR only supports aligned 64-bit read */ in pmt_telem_read_mmio()
85 struct intel_pmt_entry *entry = container_of(attr, in intel_pmt_read() local
90 return -EINVAL; in intel_pmt_read()
92 if (off >= entry->size) in intel_pmt_read()
95 if (count > entry->size - off) in intel_pmt_read()
96 count = entry->size - off; in intel_pmt_read()
98 count = pmt_telem_read_mmio(entry->ep->pcidev, entry->cb, entry->header.guid, buf, in intel_pmt_read()
99 entry->base + off, count); in intel_pmt_read()
108 struct intel_pmt_entry *entry = container_of(attr, in intel_pmt_mmap() local
111 unsigned long vsize = vma->vm_end - vma->vm_start; in intel_pmt_mmap()
113 unsigned long phys = entry->base_addr; in intel_pmt_mmap()
117 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) in intel_pmt_mmap()
118 return -EROFS; in intel_pmt_mmap()
120 psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE; in intel_pmt_mmap()
123 return -EINVAL; in intel_pmt_mmap()
126 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in intel_pmt_mmap()
127 if (io_remap_pfn_range(vma, vma->vm_start, pfn, in intel_pmt_mmap()
128 vsize, vma->vm_page_prot)) in intel_pmt_mmap()
129 return -EAGAIN; in intel_pmt_mmap()
137 struct intel_pmt_entry *entry = dev_get_drvdata(dev); in guid_show() local
139 return sprintf(buf, "0x%x\n", entry->guid); in guid_show()
146 struct intel_pmt_entry *entry = dev_get_drvdata(dev); in size_show() local
148 return sprintf(buf, "%zu\n", entry->size); in size_show()
155 struct intel_pmt_entry *entry = dev_get_drvdata(dev); in offset_show() local
157 return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr)); in offset_show()
174 static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, in intel_pmt_populate_entry() argument
178 struct pci_dev *pci_dev = ivdev->pcidev; in intel_pmt_populate_entry()
179 struct device *dev = &ivdev->auxdev.dev; in intel_pmt_populate_entry()
180 struct intel_pmt_header *header = &entry->header; in intel_pmt_populate_entry()
186 * For non-local access types the lower 3 bits of base offset in intel_pmt_populate_entry()
187 * contains the index of the base address register where the in intel_pmt_populate_entry()
190 bir = GET_BIR(header->base_offset); in intel_pmt_populate_entry()
193 switch (header->access_type) { in intel_pmt_populate_entry()
198 bir, header->access_type); in intel_pmt_populate_entry()
199 return -EINVAL; in intel_pmt_populate_entry()
202 * For access_type LOCAL, the base address is as follows: in intel_pmt_populate_entry()
203 * base address = end of discovery region + base offset in intel_pmt_populate_entry()
205 entry->base_addr = disc_res->end + 1 + header->base_offset; in intel_pmt_populate_entry()
208 * Some hardware use a different calculation for the base address in intel_pmt_populate_entry()
210 * ACCCESS_LOCAL refers to an address in the same BAR as the in intel_pmt_populate_entry()
211 * header but at a fixed offset. But as the header address was in intel_pmt_populate_entry()
213 * So search for the bar whose range includes the header address. in intel_pmt_populate_entry()
218 entry->base_addr = 0; in intel_pmt_populate_entry()
220 if (disc_res->start >= pci_resource_start(pci_dev, i) && in intel_pmt_populate_entry()
221 (disc_res->start <= pci_resource_end(pci_dev, i))) { in intel_pmt_populate_entry()
222 entry->base_addr = pci_resource_start(pci_dev, i) + in intel_pmt_populate_entry()
223 header->base_offset; in intel_pmt_populate_entry()
226 if (!entry->base_addr) in intel_pmt_populate_entry()
227 return -EINVAL; in intel_pmt_populate_entry()
232 /* Use the provided base address if it exists */ in intel_pmt_populate_entry()
233 if (ivdev->base_addr) { in intel_pmt_populate_entry()
234 entry->base_addr = ivdev->base_addr + in intel_pmt_populate_entry()
235 GET_ADDRESS(header->base_offset); in intel_pmt_populate_entry()
242 * address from the parent PCI device and add offset. in intel_pmt_populate_entry()
244 entry->base_addr = pci_resource_start(pci_dev, bir) + in intel_pmt_populate_entry()
245 GET_ADDRESS(header->base_offset); in intel_pmt_populate_entry()
249 header->access_type); in intel_pmt_populate_entry()
250 return -EINVAL; in intel_pmt_populate_entry()
253 entry->guid = header->guid; in intel_pmt_populate_entry()
254 entry->size = header->size; in intel_pmt_populate_entry()
255 entry->cb = ivdev->priv_data; in intel_pmt_populate_entry()
260 static int intel_pmt_dev_register(struct intel_pmt_entry *entry, in intel_pmt_dev_register() argument
269 ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL); in intel_pmt_dev_register()
273 dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry, in intel_pmt_dev_register()
274 "%s%d", ns->name, entry->devid); in intel_pmt_dev_register()
278 ns->name, entry->devid); in intel_pmt_dev_register()
283 entry->kobj = &dev->kobj; in intel_pmt_dev_register()
285 if (ns->attr_grp) { in intel_pmt_dev_register()
286 ret = sysfs_create_group(entry->kobj, ns->attr_grp); in intel_pmt_dev_register()
292 if (!entry->size) in intel_pmt_dev_register()
295 res.start = entry->base_addr; in intel_pmt_dev_register()
296 res.end = res.start + entry->size - 1; in intel_pmt_dev_register()
299 entry->base = devm_ioremap_resource(dev, &res); in intel_pmt_dev_register()
300 if (IS_ERR(entry->base)) { in intel_pmt_dev_register()
301 ret = PTR_ERR(entry->base); in intel_pmt_dev_register()
305 sysfs_bin_attr_init(&entry->pmt_bin_attr); in intel_pmt_dev_register()
306 entry->pmt_bin_attr.attr.name = ns->name; in intel_pmt_dev_register()
307 entry->pmt_bin_attr.attr.mode = 0440; in intel_pmt_dev_register()
308 entry->pmt_bin_attr.mmap = intel_pmt_mmap; in intel_pmt_dev_register()
309 entry->pmt_bin_attr.read = intel_pmt_read; in intel_pmt_dev_register()
310 entry->pmt_bin_attr.size = entry->size; in intel_pmt_dev_register()
312 ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); in intel_pmt_dev_register()
316 if (ns->pmt_add_endpoint) { in intel_pmt_dev_register()
317 ret = ns->pmt_add_endpoint(ivdev, entry); in intel_pmt_dev_register()
325 sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); in intel_pmt_dev_register()
327 if (ns->attr_grp) in intel_pmt_dev_register()
328 sysfs_remove_group(entry->kobj, ns->attr_grp); in intel_pmt_dev_register()
332 xa_erase(ns->xa, entry->devid); in intel_pmt_dev_register()
337 int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, in intel_pmt_dev_create() argument
340 struct device *dev = &intel_vsec_dev->auxdev.dev; in intel_pmt_dev_create()
344 disc_res = &intel_vsec_dev->resource[idx]; in intel_pmt_dev_create()
346 entry->disc_table = devm_ioremap_resource(dev, disc_res); in intel_pmt_dev_create()
347 if (IS_ERR(entry->disc_table)) in intel_pmt_dev_create()
348 return PTR_ERR(entry->disc_table); in intel_pmt_dev_create()
350 ret = ns->pmt_header_decode(entry, dev); in intel_pmt_dev_create()
354 ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res); in intel_pmt_dev_create()
358 return intel_pmt_dev_register(entry, ns, dev); in intel_pmt_dev_create()
362 void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, in intel_pmt_dev_destroy() argument
365 struct device *dev = kobj_to_dev(entry->kobj); in intel_pmt_dev_destroy()
367 if (entry->size) in intel_pmt_dev_destroy()
368 sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); in intel_pmt_dev_destroy()
370 if (ns->attr_grp) in intel_pmt_dev_destroy()
371 sysfs_remove_group(entry->kobj, ns->attr_grp); in intel_pmt_dev_destroy()
374 xa_erase(ns->xa, entry->devid); in intel_pmt_dev_destroy()