Lines Matching full:pd

121 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)  in psb_mmu_set_pd_context()  argument
123 struct drm_device *dev = pd->driver->dev; in psb_mmu_set_pd_context()
128 down_write(&pd->driver->sem); in psb_mmu_set_pd_context()
129 PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset); in psb_mmu_set_pd_context()
131 psb_mmu_flush_pd_locked(pd->driver, 1); in psb_mmu_set_pd_context()
132 pd->hw_context = hw_context; in psb_mmu_set_pd_context()
133 up_write(&pd->driver->sem); in psb_mmu_set_pd_context()
161 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); in psb_mmu_alloc_pd() local
165 if (!pd) in psb_mmu_alloc_pd()
168 pd->p = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
169 if (!pd->p) in psb_mmu_alloc_pd()
171 pd->dummy_pt = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
172 if (!pd->dummy_pt) in psb_mmu_alloc_pd()
174 pd->dummy_page = alloc_page(GFP_DMA32); in psb_mmu_alloc_pd()
175 if (!pd->dummy_page) in psb_mmu_alloc_pd()
179 pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), in psb_mmu_alloc_pd()
181 pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), in psb_mmu_alloc_pd()
184 pd->invalid_pde = 0; in psb_mmu_alloc_pd()
185 pd->invalid_pte = 0; in psb_mmu_alloc_pd()
188 v = kmap_local_page(pd->dummy_pt); in psb_mmu_alloc_pd()
190 v[i] = pd->invalid_pte; in psb_mmu_alloc_pd()
194 v = kmap_local_page(pd->p); in psb_mmu_alloc_pd()
196 v[i] = pd->invalid_pde; in psb_mmu_alloc_pd()
200 clear_page(kmap(pd->dummy_page)); in psb_mmu_alloc_pd()
201 kunmap(pd->dummy_page); in psb_mmu_alloc_pd()
203 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); in psb_mmu_alloc_pd()
204 if (!pd->tables) in psb_mmu_alloc_pd()
207 pd->hw_context = -1; in psb_mmu_alloc_pd()
208 pd->pd_mask = PSB_PTE_VALID; in psb_mmu_alloc_pd()
209 pd->driver = driver; in psb_mmu_alloc_pd()
211 return pd; in psb_mmu_alloc_pd()
214 __free_page(pd->dummy_page); in psb_mmu_alloc_pd()
216 __free_page(pd->dummy_pt); in psb_mmu_alloc_pd()
218 __free_page(pd->p); in psb_mmu_alloc_pd()
220 kfree(pd); in psb_mmu_alloc_pd()
230 void psb_mmu_free_pagedir(struct psb_mmu_pd *pd) in psb_mmu_free_pagedir() argument
232 struct psb_mmu_driver *driver = pd->driver; in psb_mmu_free_pagedir()
239 if (pd->hw_context != -1) { in psb_mmu_free_pagedir()
240 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); in psb_mmu_free_pagedir()
248 pt = pd->tables[i]; in psb_mmu_free_pagedir()
253 vfree(pd->tables); in psb_mmu_free_pagedir()
254 __free_page(pd->dummy_page); in psb_mmu_free_pagedir()
255 __free_page(pd->dummy_pt); in psb_mmu_free_pagedir()
256 __free_page(pd->p); in psb_mmu_free_pagedir()
257 kfree(pd); in psb_mmu_free_pagedir()
261 static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) in psb_mmu_alloc_pt() argument
265 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; in psb_mmu_alloc_pt()
267 spinlock_t *lock = &pd->driver->lock; in psb_mmu_alloc_pt()
287 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
289 if (pd->driver->has_clflush && pd->hw_context != -1) { in psb_mmu_alloc_pt()
301 pt->pd = pd; in psb_mmu_alloc_pt()
307 static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_alloc_map_lock() argument
313 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_alloc_map_lock()
316 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
319 pt = psb_mmu_alloc_pt(pd); in psb_mmu_pt_alloc_map_lock()
324 if (pd->tables[index]) { in psb_mmu_pt_alloc_map_lock()
328 pt = pd->tables[index]; in psb_mmu_pt_alloc_map_lock()
332 v = kmap_atomic(pd->p); in psb_mmu_pt_alloc_map_lock()
333 pd->tables[index] = pt; in psb_mmu_pt_alloc_map_lock()
334 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; in psb_mmu_pt_alloc_map_lock()
338 if (pd->hw_context != -1) { in psb_mmu_pt_alloc_map_lock()
339 psb_mmu_clflush(pd->driver, (void *)&v[index]); in psb_mmu_pt_alloc_map_lock()
340 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_alloc_map_lock()
347 static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, in psb_mmu_pt_map_lock() argument
352 spinlock_t *lock = &pd->driver->lock; in psb_mmu_pt_map_lock()
355 pt = pd->tables[index]; in psb_mmu_pt_map_lock()
366 struct psb_mmu_pd *pd = pt->pd; in psb_mmu_pt_unmap_unlock() local
371 v = kmap_atomic(pd->p); in psb_mmu_pt_unmap_unlock()
372 v[pt->index] = pd->invalid_pde; in psb_mmu_pt_unmap_unlock()
373 pd->tables[pt->index] = NULL; in psb_mmu_pt_unmap_unlock()
375 if (pd->hw_context != -1) { in psb_mmu_pt_unmap_unlock()
376 psb_mmu_clflush(pd->driver, (void *)&v[pt->index]); in psb_mmu_pt_unmap_unlock()
377 atomic_set(&pd->driver->needs_tlbflush, 1); in psb_mmu_pt_unmap_unlock()
380 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
384 spin_unlock(&pd->driver->lock); in psb_mmu_pt_unmap_unlock()
396 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; in psb_mmu_invalidate_pte()
401 struct psb_mmu_pd *pd; in psb_mmu_get_default_pd() local
404 pd = driver->default_pd; in psb_mmu_get_default_pd()
407 return pd; in psb_mmu_get_default_pd()
478 static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_flush_ptes() argument
490 unsigned long clflush_add = pd->driver->clflush_add; in psb_mmu_flush_ptes()
491 unsigned long clflush_mask = pd->driver->clflush_mask; in psb_mmu_flush_ptes()
493 if (!pd->driver->has_clflush) in psb_mmu_flush_ptes()
511 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_flush_ptes()
526 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd, in psb_mmu_remove_pfn_sequence() argument
535 down_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
542 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_remove_pfn_sequence()
554 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
555 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_remove_pfn_sequence()
557 up_read(&pd->driver->sem); in psb_mmu_remove_pfn_sequence()
559 if (pd->hw_context != -1) in psb_mmu_remove_pfn_sequence()
560 psb_mmu_flush(pd->driver); in psb_mmu_remove_pfn_sequence()
565 void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address, in psb_mmu_remove_pages() argument
587 down_read(&pd->driver->sem); in psb_mmu_remove_pages()
598 pt = psb_mmu_pt_map_lock(pd, addr); in psb_mmu_remove_pages()
611 if (pd->hw_context != -1) in psb_mmu_remove_pages()
612 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_remove_pages()
615 up_read(&pd->driver->sem); in psb_mmu_remove_pages()
617 if (pd->hw_context != -1) in psb_mmu_remove_pages()
618 psb_mmu_flush(pd->driver); in psb_mmu_remove_pages()
621 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument
633 down_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
640 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pfn_sequence()
656 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
657 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); in psb_mmu_insert_pfn_sequence()
659 up_read(&pd->driver->sem); in psb_mmu_insert_pfn_sequence()
661 if (pd->hw_context != -1) in psb_mmu_insert_pfn_sequence()
662 psb_mmu_flush(pd->driver); in psb_mmu_insert_pfn_sequence()
667 int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, in psb_mmu_insert_pages() argument
695 down_read(&pd->driver->sem); in psb_mmu_insert_pages()
704 pt = psb_mmu_pt_alloc_map_lock(pd, addr); in psb_mmu_insert_pages()
722 if (pd->hw_context != -1) in psb_mmu_insert_pages()
723 psb_mmu_flush_ptes(pd, f_address, num_pages, in psb_mmu_insert_pages()
726 up_read(&pd->driver->sem); in psb_mmu_insert_pages()
728 if (pd->hw_context != -1) in psb_mmu_insert_pages()
729 psb_mmu_flush(pd->driver); in psb_mmu_insert_pages()
734 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, in psb_mmu_virtual_to_pfn() argument
740 spinlock_t *lock = &pd->driver->lock; in psb_mmu_virtual_to_pfn()
742 down_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()
743 pt = psb_mmu_pt_map_lock(pd, virtual); in psb_mmu_virtual_to_pfn()
748 v = kmap_atomic(pd->p); in psb_mmu_virtual_to_pfn()
753 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || in psb_mmu_virtual_to_pfn()
754 !(pd->invalid_pte & PSB_PTE_VALID)) { in psb_mmu_virtual_to_pfn()
759 *pfn = pd->invalid_pte >> PAGE_SHIFT; in psb_mmu_virtual_to_pfn()
771 up_read(&pd->driver->sem); in psb_mmu_virtual_to_pfn()