Lines Matching refs:vma
802 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
803 if (vma) { in nvkm_vma_new()
804 vma->addr = addr; in nvkm_vma_new()
805 vma->size = size; in nvkm_vma_new()
806 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
807 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
809 return vma; in nvkm_vma_new()
813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
817 BUG_ON(vma->size == tail); in nvkm_vma_tail()
819 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
821 vma->size -= tail; in nvkm_vma_tail()
823 new->mapref = vma->mapref; in nvkm_vma_tail()
824 new->sparse = vma->sparse; in nvkm_vma_tail()
825 new->page = vma->page; in nvkm_vma_tail()
826 new->refd = vma->refd; in nvkm_vma_tail()
827 new->used = vma->used; in nvkm_vma_tail()
828 new->part = vma->part; in nvkm_vma_tail()
829 new->busy = vma->busy; in nvkm_vma_tail()
830 new->mapped = vma->mapped; in nvkm_vma_tail()
831 list_add(&new->head, &vma->head); in nvkm_vma_tail()
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_remove() argument
838 rb_erase(&vma->tree, &vmm->free); in nvkm_vmm_free_remove()
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_delete() argument
844 nvkm_vmm_free_remove(vmm, vma); in nvkm_vmm_free_delete()
845 list_del(&vma->head); in nvkm_vmm_free_delete()
846 kfree(vma); in nvkm_vmm_free_delete()
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_insert() argument
858 if (vma->size < this->size) in nvkm_vmm_free_insert()
861 if (vma->size > this->size) in nvkm_vmm_free_insert()
864 if (vma->addr < this->addr) in nvkm_vmm_free_insert()
867 if (vma->addr > this->addr) in nvkm_vmm_free_insert()
873 rb_link_node(&vma->tree, parent, ptr); in nvkm_vmm_free_insert()
874 rb_insert_color(&vma->tree, &vmm->free); in nvkm_vmm_free_insert()
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_remove() argument
880 rb_erase(&vma->tree, &vmm->root); in nvkm_vmm_node_remove()
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_delete() argument
886 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_delete()
887 list_del(&vma->head); in nvkm_vmm_node_delete()
888 kfree(vma); in nvkm_vmm_node_delete()
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_insert() argument
900 if (vma->addr < this->addr) in nvkm_vmm_node_insert()
903 if (vma->addr > this->addr) in nvkm_vmm_node_insert()
909 rb_link_node(&vma->tree, parent, ptr); in nvkm_vmm_node_insert()
910 rb_insert_color(&vma->tree, &vmm->root); in nvkm_vmm_node_insert()
918 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); in nvkm_vmm_node_search() local
919 if (addr < vma->addr) in nvkm_vmm_node_search()
922 if (addr >= vma->addr + vma->size) in nvkm_vmm_node_search()
925 return vma; in nvkm_vmm_node_search()
935 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size) in nvkm_vmm_node_merge() argument
938 if (vma->size == size) { in nvkm_vmm_node_merge()
939 vma->size += next->size; in nvkm_vmm_node_merge()
942 prev->size += vma->size; in nvkm_vmm_node_merge()
943 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
946 return vma; in nvkm_vmm_node_merge()
951 vma->size -= size; in nvkm_vmm_node_merge()
959 if (vma->size != size) { in nvkm_vmm_node_merge()
960 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_merge()
962 vma->addr += size; in nvkm_vmm_node_merge()
963 vma->size -= size; in nvkm_vmm_node_merge()
964 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_merge()
966 prev->size += vma->size; in nvkm_vmm_node_merge()
967 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
972 return vma; in nvkm_vmm_node_merge()
977 struct nvkm_vma *vma, u64 addr, u64 size) in nvkm_vmm_node_split() argument
981 if (vma->addr != addr) { in nvkm_vmm_node_split()
982 prev = vma; in nvkm_vmm_node_split()
983 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) in nvkm_vmm_node_split()
985 vma->part = true; in nvkm_vmm_node_split()
986 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_split()
989 if (vma->size != size) { in nvkm_vmm_node_split()
991 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { in nvkm_vmm_node_split()
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size); in nvkm_vmm_node_split()
999 return vma; in nvkm_vmm_node_split()
1003 nvkm_vma_dump(struct nvkm_vma *vma) in nvkm_vma_dump() argument
1006 vma->addr, (u64)vma->size, in nvkm_vma_dump()
1007 vma->used ? '-' : 'F', in nvkm_vma_dump()
1008 vma->mapref ? 'R' : '-', in nvkm_vma_dump()
1009 vma->sparse ? 'S' : '-', in nvkm_vma_dump()
1010 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', in nvkm_vma_dump()
1011 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', in nvkm_vma_dump()
1012 vma->part ? 'P' : '-', in nvkm_vma_dump()
1013 vma->busy ? 'B' : '-', in nvkm_vma_dump()
1014 vma->mapped ? 'M' : '-', in nvkm_vma_dump()
1015 vma->memory); in nvkm_vma_dump()
1021 struct nvkm_vma *vma; in nvkm_vmm_dump() local
1022 list_for_each_entry(vma, &vmm->list, head) { in nvkm_vmm_dump()
1023 nvkm_vma_dump(vma); in nvkm_vmm_dump()
1030 struct nvkm_vma *vma; in nvkm_vmm_dtor() local
1044 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); in nvkm_vmm_dtor() local
1045 nvkm_vmm_put(vmm, &vma); in nvkm_vmm_dtor()
1059 vma = list_first_entry(&vmm->list, typeof(*vma), head); in nvkm_vmm_dtor()
1060 list_del(&vma->head); in nvkm_vmm_dtor()
1061 kfree(vma); in nvkm_vmm_dtor()
1078 struct nvkm_vma *vma; in nvkm_vmm_ctor_managed() local
1079 if (!(vma = nvkm_vma_new(addr, size))) in nvkm_vmm_ctor_managed()
1081 vma->mapref = true; in nvkm_vmm_ctor_managed()
1082 vma->sparse = false; in nvkm_vmm_ctor_managed()
1083 vma->used = true; in nvkm_vmm_ctor_managed()
1084 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_ctor_managed()
1085 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor_managed()
1098 struct nvkm_vma *vma; in nvkm_vmm_ctor() local
1170 if (!(vma = nvkm_vma_new(addr, size))) in nvkm_vmm_ctor()
1172 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1173 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1193 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) in nvkm_vmm_ctor()
1196 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1197 list_add(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_pfn_split_merge() argument
1221 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) { in nvkm_vmm_pfn_split_merge()
1226 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) { in nvkm_vmm_pfn_split_merge()
1233 return nvkm_vmm_node_merge(vmm, prev, vma, next, size); in nvkm_vmm_pfn_split_merge()
1234 return nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_vmm_pfn_split_merge()
1240 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); in nvkm_vmm_pfn_unmap() local
1245 if (!vma) in nvkm_vmm_pfn_unmap()
1249 if (!vma->mapped || vma->memory) in nvkm_vmm_pfn_unmap()
1252 size = min(limit - start, vma->size - (start - vma->addr)); in nvkm_vmm_pfn_unmap()
1254 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd], in nvkm_vmm_pfn_unmap()
1257 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false); in nvkm_vmm_pfn_unmap()
1259 vma = next; in nvkm_vmm_pfn_unmap()
1260 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_pfn_unmap()
1261 vma->mapped = false; in nvkm_vmm_pfn_unmap()
1263 } while ((vma = node(vma, next)) && (start = vma->addr) < limit); in nvkm_vmm_pfn_unmap()
1277 struct nvkm_vma *vma, *tmp; in nvkm_vmm_pfn_map() local
1298 if (!(vma = nvkm_vmm_node_search(vmm, addr))) in nvkm_vmm_pfn_map()
1303 bool mapped = vma->mapped; in nvkm_vmm_pfn_map()
1316 size = min_t(u64, size, vma->size + vma->addr - addr); in nvkm_vmm_pfn_map()
1321 if (!vma->mapref || vma->memory) { in nvkm_vmm_pfn_map()
1338 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size, in nvkm_vmm_pfn_map()
1350 vma = tmp; in nvkm_vmm_pfn_map()
1376 if (vma->addr + vma->size == addr + size) in nvkm_vmm_pfn_map()
1377 vma = node(vma, next); in nvkm_vmm_pfn_map()
1391 } while (vma && start < limit); in nvkm_vmm_pfn_map()
1397 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap_region() argument
1402 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_unmap_region()
1403 nvkm_memory_unref(&vma->memory); in nvkm_vmm_unmap_region()
1404 vma->mapped = false; in nvkm_vmm_unmap_region()
1406 if (vma->part && (prev = node(vma, prev)) && prev->mapped) in nvkm_vmm_unmap_region()
1408 if ((next = node(vma, next)) && (!next->part || next->mapped)) in nvkm_vmm_unmap_region()
1410 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); in nvkm_vmm_unmap_region()
1414 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn) in nvkm_vmm_unmap_locked() argument
1416 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; in nvkm_vmm_unmap_locked()
1418 if (vma->mapref) { in nvkm_vmm_unmap_locked()
1419 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1420 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_unmap_locked()
1422 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1425 nvkm_vmm_unmap_region(vmm, vma); in nvkm_vmm_unmap_locked()
1429 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap() argument
1431 if (vma->memory) { in nvkm_vmm_unmap()
1433 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_vmm_unmap()
1439 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_valid() argument
1461 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1462 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) || in nvkm_vmm_map_valid()
1466 vma->addr, (u64)vma->size, map->offset, map->page->shift, in nvkm_vmm_map_valid()
1475 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_choose() argument
1480 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map)) in nvkm_vmm_map_choose()
1487 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_locked() argument
1493 map->no_comp = vma->no_comp; in nvkm_vmm_map_locked()
1496 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) { in nvkm_vmm_map_locked()
1499 map->offset, (u64)vma->size); in nvkm_vmm_map_locked()
1504 if (vma->page == NVKM_VMA_PAGE_NONE && in nvkm_vmm_map_locked()
1505 vma->refd == NVKM_VMA_PAGE_NONE) { in nvkm_vmm_map_locked()
1509 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1513 nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1518 if (vma->refd != NVKM_VMA_PAGE_NONE) in nvkm_vmm_map_locked()
1519 map->page = &vmm->func->page[vma->refd]; in nvkm_vmm_map_locked()
1521 map->page = &vmm->func->page[vma->page]; in nvkm_vmm_map_locked()
1523 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1556 if (vma->refd == NVKM_VMA_PAGE_NONE) { in nvkm_vmm_map_locked()
1557 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1561 vma->refd = map->page - vmm->func->page; in nvkm_vmm_map_locked()
1563 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1566 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_map_locked()
1567 nvkm_memory_unref(&vma->memory); in nvkm_vmm_map_locked()
1568 vma->memory = nvkm_memory_ref(map->memory); in nvkm_vmm_map_locked()
1569 vma->mapped = true; in nvkm_vmm_map_locked()
1570 vma->tags = map->tags; in nvkm_vmm_map_locked()
1575 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, in nvkm_vmm_map() argument
1580 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) && in nvkm_vmm_map()
1582 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1585 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1586 vma->busy = false; in nvkm_vmm_map()
1592 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_region() argument
1596 if ((prev = node(vma, prev)) && !prev->used) { in nvkm_vmm_put_region()
1597 vma->addr = prev->addr; in nvkm_vmm_put_region()
1598 vma->size += prev->size; in nvkm_vmm_put_region()
1602 if ((next = node(vma, next)) && !next->used) { in nvkm_vmm_put_region()
1603 vma->size += next->size; in nvkm_vmm_put_region()
1607 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_put_region()
1611 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_locked() argument
1614 struct nvkm_vma *next = vma; in nvkm_vmm_put_locked()
1616 BUG_ON(vma->part); in nvkm_vmm_put_locked()
1618 if (vma->mapref || !vma->sparse) { in nvkm_vmm_put_locked()
1639 size, vma->sparse, in nvkm_vmm_put_locked()
1653 next = vma; in nvkm_vmm_put_locked()
1657 } while ((next = node(vma, next)) && next->part); in nvkm_vmm_put_locked()
1659 if (vma->sparse && !vma->mapref) { in nvkm_vmm_put_locked()
1668 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size); in nvkm_vmm_put_locked()
1670 if (vma->sparse) { in nvkm_vmm_put_locked()
1679 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false); in nvkm_vmm_put_locked()
1683 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_put_locked()
1686 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1687 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vmm_put_locked()
1688 vma->used = false; in nvkm_vmm_put_locked()
1689 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_put_locked()
1695 struct nvkm_vma *vma = *pvma; in nvkm_vmm_put() local
1696 if (vma) { in nvkm_vmm_put()
1698 nvkm_vmm_put_locked(vmm, vma); in nvkm_vmm_put()
1710 struct nvkm_vma *vma = NULL, *tmp; in nvkm_vmm_get_locked() local
1790 vma = this; in nvkm_vmm_get_locked()
1795 if (unlikely(!vma)) in nvkm_vmm_get_locked()
1801 if (addr != vma->addr) { in nvkm_vmm_get_locked()
1802 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) { in nvkm_vmm_get_locked()
1803 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1806 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_get_locked()
1807 vma = tmp; in nvkm_vmm_get_locked()
1810 if (size != vma->size) { in nvkm_vmm_get_locked()
1811 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { in nvkm_vmm_get_locked()
1812 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1820 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1822 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true); in nvkm_vmm_get_locked()
1824 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1828 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1832 vma->mapref = mapref && !getref; in nvkm_vmm_get_locked()
1833 vma->sparse = sparse; in nvkm_vmm_get_locked()
1834 vma->page = page - vmm->func->page; in nvkm_vmm_get_locked()
1835 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE; in nvkm_vmm_get_locked()
1836 vma->used = true; in nvkm_vmm_get_locked()
1837 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_get_locked()
1838 *pvma = vma; in nvkm_vmm_get_locked()