1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * linux/arch/m68k/sun3/sun3dvma.c
4   *
5   * Copyright (C) 2000 Sam Creasey
6   *
7   * Contains common routines for sun3/sun3x DVMA management.
8   */
9  
10  #include <linux/memblock.h>
11  #include <linux/init.h>
12  #include <linux/module.h>
13  #include <linux/kernel.h>
14  #include <linux/gfp.h>
15  #include <linux/mm.h>
16  #include <linux/list.h>
17  
18  #include <asm/page.h>
19  #include <asm/dvma.h>
20  
21  #undef DVMA_DEBUG
22  
23  static unsigned long *iommu_use;
24  
25  #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
26  
27  #define dvma_entry_use(baddr)		(iommu_use[dvma_index(baddr)])
28  
29  struct hole {
30  	unsigned long start;
31  	unsigned long end;
32  	unsigned long size;
33  	struct list_head list;
34  };
35  
36  static struct list_head hole_list;
37  static struct list_head hole_cache;
38  static struct hole initholes[64];
39  
40  #ifdef DVMA_DEBUG
41  
42  static unsigned long dvma_allocs;
43  static unsigned long dvma_frees;
44  static unsigned long long dvma_alloc_bytes;
45  static unsigned long long dvma_free_bytes;
46  
print_use(void)47  static void print_use(void)
48  {
49  
50  	int i;
51  	int j = 0;
52  
53  	pr_info("dvma entry usage:\n");
54  
55  	for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
56  		if(!iommu_use[i])
57  			continue;
58  
59  		j++;
60  
61  		pr_info("dvma entry: %08x len %08lx\n",
62  			(i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
63  	}
64  
65  	pr_info("%d entries in use total\n", j);
66  
67  	pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
68  	pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
69  		dvma_free_bytes);
70  }
71  
print_holes(struct list_head * holes)72  static void print_holes(struct list_head *holes)
73  {
74  
75  	struct list_head *cur;
76  	struct hole *hole;
77  
78  	pr_info("listing dvma holes\n");
79  	list_for_each(cur, holes) {
80  		hole = list_entry(cur, struct hole, list);
81  
82  		if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
83  			continue;
84  
85  		pr_info("hole: start %08lx end %08lx size %08lx\n",
86  			hole->start, hole->end, hole->size);
87  	}
88  
89  	pr_info("end of hole listing...\n");
90  }
91  #endif /* DVMA_DEBUG */
92  
refill(void)93  static inline int refill(void)
94  {
95  
96  	struct hole *hole;
97  	struct hole *prev = NULL;
98  	struct list_head *cur;
99  	int ret = 0;
100  
101  	list_for_each(cur, &hole_list) {
102  		hole = list_entry(cur, struct hole, list);
103  
104  		if(!prev) {
105  			prev = hole;
106  			continue;
107  		}
108  
109  		if(hole->end == prev->start) {
110  			hole->size += prev->size;
111  			hole->end = prev->end;
112  			list_move(&(prev->list), &hole_cache);
113  			ret++;
114  		}
115  
116  	}
117  
118  	return ret;
119  }
120  
rmcache(void)121  static inline struct hole *rmcache(void)
122  {
123  	struct hole *ret;
124  
125  	if(list_empty(&hole_cache)) {
126  		if(!refill()) {
127  			pr_crit("out of dvma hole cache!\n");
128  			BUG();
129  		}
130  	}
131  
132  	ret = list_entry(hole_cache.next, struct hole, list);
133  	list_del(&(ret->list));
134  
135  	return ret;
136  
137  }
138  
get_baddr(int len,unsigned long align)139  static inline unsigned long get_baddr(int len, unsigned long align)
140  {
141  
142  	struct list_head *cur;
143  	struct hole *hole;
144  
145  	if(list_empty(&hole_list)) {
146  #ifdef DVMA_DEBUG
147  		pr_crit("out of dvma holes! (printing hole cache)\n");
148  		print_holes(&hole_cache);
149  		print_use();
150  #endif
151  		BUG();
152  	}
153  
154  	list_for_each(cur, &hole_list) {
155  		unsigned long newlen;
156  
157  		hole = list_entry(cur, struct hole, list);
158  
159  		if(align > DVMA_PAGE_SIZE)
160  			newlen = len + ((hole->end - len) & (align-1));
161  		else
162  			newlen = len;
163  
164  		if(hole->size > newlen) {
165  			hole->end -= newlen;
166  			hole->size -= newlen;
167  			dvma_entry_use(hole->end) = newlen;
168  #ifdef DVMA_DEBUG
169  			dvma_allocs++;
170  			dvma_alloc_bytes += newlen;
171  #endif
172  			return hole->end;
173  		} else if(hole->size == newlen) {
174  			list_move(&(hole->list), &hole_cache);
175  			dvma_entry_use(hole->start) = newlen;
176  #ifdef DVMA_DEBUG
177  			dvma_allocs++;
178  			dvma_alloc_bytes += newlen;
179  #endif
180  			return hole->start;
181  		}
182  
183  	}
184  
185  	pr_crit("unable to find dvma hole!\n");
186  	BUG();
187  	return 0;
188  }
189  
free_baddr(unsigned long baddr)190  static inline int free_baddr(unsigned long baddr)
191  {
192  
193  	unsigned long len;
194  	struct hole *hole;
195  	struct list_head *cur;
196  
197  	len = dvma_entry_use(baddr);
198  	dvma_entry_use(baddr) = 0;
199  	baddr &= DVMA_PAGE_MASK;
200  	dvma_unmap_iommu(baddr, len);
201  
202  #ifdef DVMA_DEBUG
203  	dvma_frees++;
204  	dvma_free_bytes += len;
205  #endif
206  
207  	list_for_each(cur, &hole_list) {
208  		hole = list_entry(cur, struct hole, list);
209  
210  		if(hole->end == baddr) {
211  			hole->end += len;
212  			hole->size += len;
213  			return 0;
214  		} else if(hole->start == (baddr + len)) {
215  			hole->start = baddr;
216  			hole->size += len;
217  			return 0;
218  		}
219  
220  	}
221  
222  	hole = rmcache();
223  
224  	hole->start = baddr;
225  	hole->end = baddr + len;
226  	hole->size = len;
227  
228  //	list_add_tail(&(hole->list), cur);
229  	list_add(&(hole->list), cur);
230  
231  	return 0;
232  
233  }
234  
dvma_init(void)235  void __init dvma_init(void)
236  {
237  
238  	struct hole *hole;
239  	int i;
240  
241  	INIT_LIST_HEAD(&hole_list);
242  	INIT_LIST_HEAD(&hole_cache);
243  
244  	/* prepare the hole cache */
245  	for(i = 0; i < 64; i++)
246  		list_add(&(initholes[i].list), &hole_cache);
247  
248  	hole = rmcache();
249  	hole->start = DVMA_START;
250  	hole->end = DVMA_END;
251  	hole->size = DVMA_SIZE;
252  
253  	list_add(&(hole->list), &hole_list);
254  
255  	iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
256  				   SMP_CACHE_BYTES);
257  	if (!iommu_use)
258  		panic("%s: Failed to allocate %zu bytes\n", __func__,
259  		      IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
260  
261  	dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
262  
263  	sun3_dvma_init();
264  }
265  
dvma_map_align(unsigned long kaddr,int len,int align)266  unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
267  {
268  
269  	unsigned long baddr;
270  	unsigned long off;
271  
272  	if(!len)
273  		len = 0x800;
274  
275  	if(!kaddr || !len) {
276  //		pr_err("error: kaddr %lx len %x\n", kaddr, len);
277  //		*(int *)4 = 0;
278  		return 0;
279  	}
280  
281  	pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
282  	off = kaddr & ~DVMA_PAGE_MASK;
283  	kaddr &= PAGE_MASK;
284  	len += off;
285  	len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
286  
287  	if(align == 0)
288  		align = DVMA_PAGE_SIZE;
289  	else
290  		align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
291  
292  	baddr = get_baddr(len, align);
293  //	pr_info("using baddr %lx\n", baddr);
294  
295  	if(!dvma_map_iommu(kaddr, baddr, len))
296  		return (baddr + off);
297  
298  	pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
299  	len);
300  	BUG();
301  	return 0;
302  }
303  EXPORT_SYMBOL(dvma_map_align);
304  
dvma_unmap(void * baddr)305  void dvma_unmap(void *baddr)
306  {
307  	unsigned long addr;
308  
309  	addr = (unsigned long)baddr;
310  	/* check if this is a vme mapping */
311  	if(!(addr & 0x00f00000))
312  		addr |= 0xf00000;
313  
314  	free_baddr(addr);
315  
316  	return;
317  
318  }
319  EXPORT_SYMBOL(dvma_unmap);
320  
dvma_malloc_align(unsigned long len,unsigned long align)321  void *dvma_malloc_align(unsigned long len, unsigned long align)
322  {
323  	unsigned long kaddr;
324  	unsigned long baddr;
325  	unsigned long vaddr;
326  
327  	if(!len)
328  		return NULL;
329  
330  	pr_debug("dvma_malloc request %lx bytes\n", len);
331  	len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
332  
333          if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
334  		return NULL;
335  
336  	if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
337  		free_pages(kaddr, get_order(len));
338  		return NULL;
339  	}
340  
341  	vaddr = dvma_btov(baddr);
342  
343  	if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
344  		dvma_unmap((void *)baddr);
345  		free_pages(kaddr, get_order(len));
346  		return NULL;
347  	}
348  
349  	pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
350  		 baddr);
351  
352  	return (void *)vaddr;
353  
354  }
355  EXPORT_SYMBOL(dvma_malloc_align);
356  
dvma_free(void * vaddr)357  void dvma_free(void *vaddr)
358  {
359  
360  	return;
361  
362  }
363  EXPORT_SYMBOL(dvma_free);
364