1  /******************************************************************************
2   * grant_table.c
3   *
4   * Granting foreign access to our memory reservation.
5   *
6   * Copyright (c) 2005-2006, Christopher Clark
7   * Copyright (c) 2004-2005, K A Fraser
8   *
9   * This program is free software; you can redistribute it and/or
10   * modify it under the terms of the GNU General Public License version 2
11   * as published by the Free Software Foundation; or, when distributed
12   * separately from the Linux kernel or incorporated into other
13   * software packages, subject to the following license:
14   *
15   * Permission is hereby granted, free of charge, to any person obtaining a copy
16   * of this source file (the "Software"), to deal in the Software without
17   * restriction, including without limitation the rights to use, copy, modify,
18   * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19   * and to permit persons to whom the Software is furnished to do so, subject to
20   * the following conditions:
21   *
22   * The above copyright notice and this permission notice shall be included in
23   * all copies or substantial portions of the Software.
24   *
25   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28   * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29   * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30   * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31   * IN THE SOFTWARE.
32   */
33  
34  #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35  
36  #include <linux/bitmap.h>
37  #include <linux/memblock.h>
38  #include <linux/sched.h>
39  #include <linux/mm.h>
40  #include <linux/slab.h>
41  #include <linux/vmalloc.h>
42  #include <linux/uaccess.h>
43  #include <linux/io.h>
44  #include <linux/delay.h>
45  #include <linux/hardirq.h>
46  #include <linux/workqueue.h>
47  #include <linux/ratelimit.h>
48  #include <linux/moduleparam.h>
49  #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
50  #include <linux/dma-mapping.h>
51  #endif
52  
53  #include <xen/xen.h>
54  #include <xen/interface/xen.h>
55  #include <xen/page.h>
56  #include <xen/grant_table.h>
57  #include <xen/interface/memory.h>
58  #include <xen/hvc-console.h>
59  #include <xen/swiotlb-xen.h>
60  #include <xen/balloon.h>
61  #ifdef CONFIG_X86
62  #include <asm/xen/cpuid.h>
63  #endif
64  #include <xen/mem-reservation.h>
65  #include <asm/xen/hypercall.h>
66  #include <asm/xen/interface.h>
67  
68  #include <asm/sync_bitops.h>
69  
70  #define GNTTAB_LIST_END 0xffffffff
71  
72  static grant_ref_t **gnttab_list;
73  static unsigned int nr_grant_frames;
74  
75  /*
76   * Handling of free grants:
77   *
78   * Free grants are in a simple list anchored in gnttab_free_head. They are
79   * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
80   * of free entries is stored in gnttab_free_count.
81   * Additionally there is a bitmap of free entries anchored in
82   * gnttab_free_bitmap. This is being used for simplifying allocation of
83   * multiple consecutive grants, which is needed e.g. for support of virtio.
84   * gnttab_last_free is used to add free entries of new frames at the end of
85   * the free list.
86   * gnttab_free_tail_ptr specifies the variable which references the start
87   * of consecutive free grants ending with gnttab_last_free. This pointer is
88   * updated in a rather defensive way, in order to avoid performance hits in
89   * hot paths.
90   * All those variables are protected by gnttab_list_lock.
91   */
92  static int gnttab_free_count;
93  static unsigned int gnttab_size;
94  static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
95  static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
96  static grant_ref_t *gnttab_free_tail_ptr;
97  static unsigned long *gnttab_free_bitmap;
98  static DEFINE_SPINLOCK(gnttab_list_lock);
99  
100  struct grant_frames xen_auto_xlat_grant_frames;
101  static unsigned int xen_gnttab_version;
102  module_param_named(version, xen_gnttab_version, uint, 0);
103  
104  static union {
105  	struct grant_entry_v1 *v1;
106  	union grant_entry_v2 *v2;
107  	void *addr;
108  } gnttab_shared;
109  
110  /*This is a structure of function pointers for grant table*/
111  struct gnttab_ops {
112  	/*
113  	 * Version of the grant interface.
114  	 */
115  	unsigned int version;
116  	/*
117  	 * Grant refs per grant frame.
118  	 */
119  	unsigned int grefs_per_grant_frame;
120  	/*
121  	 * Mapping a list of frames for storing grant entries. Frames parameter
122  	 * is used to store grant table address when grant table being setup,
123  	 * nr_gframes is the number of frames to map grant table. Returning
124  	 * GNTST_okay means success and negative value means failure.
125  	 */
126  	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
127  	/*
128  	 * Release a list of frames which are mapped in map_frames for grant
129  	 * entry status.
130  	 */
131  	void (*unmap_frames)(void);
132  	/*
133  	 * Introducing a valid entry into the grant table, granting the frame of
134  	 * this grant entry to domain for accessing. Ref
135  	 * parameter is reference of this introduced grant entry, domid is id of
136  	 * granted domain, frame is the page frame to be granted, and flags is
137  	 * status of the grant entry to be updated.
138  	 */
139  	void (*update_entry)(grant_ref_t ref, domid_t domid,
140  			     unsigned long frame, unsigned flags);
141  	/*
142  	 * Stop granting a grant entry to domain for accessing. Ref parameter is
143  	 * reference of a grant entry whose grant access will be stopped.
144  	 * If the grant entry is currently mapped for reading or writing, just
145  	 * return failure(==0) directly and don't tear down the grant access.
146  	 * Otherwise, stop grant access for this entry and return success(==1).
147  	 */
148  	int (*end_foreign_access_ref)(grant_ref_t ref);
149  	/*
150  	 * Read the frame number related to a given grant reference.
151  	 */
152  	unsigned long (*read_frame)(grant_ref_t ref);
153  };
154  
155  struct unmap_refs_callback_data {
156  	struct completion completion;
157  	int result;
158  };
159  
160  static const struct gnttab_ops *gnttab_interface;
161  
162  /* This reflects status of grant entries, so act as a global value. */
163  static grant_status_t *grstatus;
164  
165  static struct gnttab_free_callback *gnttab_free_callback_list;
166  
167  static int gnttab_expand(unsigned int req_entries);
168  
169  #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
170  #define SPP (PAGE_SIZE / sizeof(grant_status_t))
171  
__gnttab_entry(grant_ref_t entry)172  static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
173  {
174  	return &gnttab_list[(entry) / RPP][(entry) % RPP];
175  }
176  /* This can be used as an l-value */
177  #define gnttab_entry(entry) (*__gnttab_entry(entry))
178  
get_free_entries(unsigned count)179  static int get_free_entries(unsigned count)
180  {
181  	unsigned long flags;
182  	int ref, rc = 0;
183  	grant_ref_t head;
184  
185  	spin_lock_irqsave(&gnttab_list_lock, flags);
186  
187  	if ((gnttab_free_count < count) &&
188  	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
189  		spin_unlock_irqrestore(&gnttab_list_lock, flags);
190  		return rc;
191  	}
192  
193  	ref = head = gnttab_free_head;
194  	gnttab_free_count -= count;
195  	while (count--) {
196  		bitmap_clear(gnttab_free_bitmap, head, 1);
197  		if (gnttab_free_tail_ptr == __gnttab_entry(head))
198  			gnttab_free_tail_ptr = &gnttab_free_head;
199  		if (count)
200  			head = gnttab_entry(head);
201  	}
202  	gnttab_free_head = gnttab_entry(head);
203  	gnttab_entry(head) = GNTTAB_LIST_END;
204  
205  	if (!gnttab_free_count) {
206  		gnttab_last_free = GNTTAB_LIST_END;
207  		gnttab_free_tail_ptr = NULL;
208  	}
209  
210  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
211  
212  	return ref;
213  }
214  
get_seq_entry_count(void)215  static int get_seq_entry_count(void)
216  {
217  	if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
218  	    *gnttab_free_tail_ptr == GNTTAB_LIST_END)
219  		return 0;
220  
221  	return gnttab_last_free - *gnttab_free_tail_ptr + 1;
222  }
223  
224  /* Rebuilds the free grant list and tries to find count consecutive entries. */
get_free_seq(unsigned int count)225  static int get_free_seq(unsigned int count)
226  {
227  	int ret = -ENOSPC;
228  	unsigned int from, to;
229  	grant_ref_t *last;
230  
231  	gnttab_free_tail_ptr = &gnttab_free_head;
232  	last = &gnttab_free_head;
233  
234  	for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
235  	     from < gnttab_size;
236  	     from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
237  		to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
238  					from + 1);
239  		if (ret < 0 && to - from >= count) {
240  			ret = from;
241  			bitmap_clear(gnttab_free_bitmap, ret, count);
242  			from += count;
243  			gnttab_free_count -= count;
244  			if (from == to)
245  				continue;
246  		}
247  
248  		/*
249  		 * Recreate the free list in order to have it properly sorted.
250  		 * This is needed to make sure that the free tail has the maximum
251  		 * possible size.
252  		 */
253  		while (from < to) {
254  			*last = from;
255  			last = __gnttab_entry(from);
256  			gnttab_last_free = from;
257  			from++;
258  		}
259  		if (to < gnttab_size)
260  			gnttab_free_tail_ptr = __gnttab_entry(to - 1);
261  	}
262  
263  	*last = GNTTAB_LIST_END;
264  	if (gnttab_last_free != gnttab_size - 1)
265  		gnttab_free_tail_ptr = NULL;
266  
267  	return ret;
268  }
269  
get_free_entries_seq(unsigned int count)270  static int get_free_entries_seq(unsigned int count)
271  {
272  	unsigned long flags;
273  	int ret = 0;
274  
275  	spin_lock_irqsave(&gnttab_list_lock, flags);
276  
277  	if (gnttab_free_count < count) {
278  		ret = gnttab_expand(count - gnttab_free_count);
279  		if (ret < 0)
280  			goto out;
281  	}
282  
283  	if (get_seq_entry_count() < count) {
284  		ret = get_free_seq(count);
285  		if (ret >= 0)
286  			goto out;
287  		ret = gnttab_expand(count - get_seq_entry_count());
288  		if (ret < 0)
289  			goto out;
290  	}
291  
292  	ret = *gnttab_free_tail_ptr;
293  	*gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
294  	gnttab_free_count -= count;
295  	if (!gnttab_free_count)
296  		gnttab_free_tail_ptr = NULL;
297  	bitmap_clear(gnttab_free_bitmap, ret, count);
298  
299   out:
300  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
301  
302  	return ret;
303  }
304  
do_free_callbacks(void)305  static void do_free_callbacks(void)
306  {
307  	struct gnttab_free_callback *callback, *next;
308  
309  	callback = gnttab_free_callback_list;
310  	gnttab_free_callback_list = NULL;
311  
312  	while (callback != NULL) {
313  		next = callback->next;
314  		if (gnttab_free_count >= callback->count) {
315  			callback->next = NULL;
316  			callback->fn(callback->arg);
317  		} else {
318  			callback->next = gnttab_free_callback_list;
319  			gnttab_free_callback_list = callback;
320  		}
321  		callback = next;
322  	}
323  }
324  
check_free_callbacks(void)325  static inline void check_free_callbacks(void)
326  {
327  	if (unlikely(gnttab_free_callback_list))
328  		do_free_callbacks();
329  }
330  
put_free_entry_locked(grant_ref_t ref)331  static void put_free_entry_locked(grant_ref_t ref)
332  {
333  	if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
334  		return;
335  
336  	gnttab_entry(ref) = gnttab_free_head;
337  	gnttab_free_head = ref;
338  	if (!gnttab_free_count)
339  		gnttab_last_free = ref;
340  	if (gnttab_free_tail_ptr == &gnttab_free_head)
341  		gnttab_free_tail_ptr = __gnttab_entry(ref);
342  	gnttab_free_count++;
343  	bitmap_set(gnttab_free_bitmap, ref, 1);
344  }
345  
put_free_entry(grant_ref_t ref)346  static void put_free_entry(grant_ref_t ref)
347  {
348  	unsigned long flags;
349  
350  	spin_lock_irqsave(&gnttab_list_lock, flags);
351  	put_free_entry_locked(ref);
352  	check_free_callbacks();
353  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
354  }
355  
gnttab_set_free(unsigned int start,unsigned int n)356  static void gnttab_set_free(unsigned int start, unsigned int n)
357  {
358  	unsigned int i;
359  
360  	for (i = start; i < start + n - 1; i++)
361  		gnttab_entry(i) = i + 1;
362  
363  	gnttab_entry(i) = GNTTAB_LIST_END;
364  	if (!gnttab_free_count) {
365  		gnttab_free_head = start;
366  		gnttab_free_tail_ptr = &gnttab_free_head;
367  	} else {
368  		gnttab_entry(gnttab_last_free) = start;
369  	}
370  	gnttab_free_count += n;
371  	gnttab_last_free = i;
372  
373  	bitmap_set(gnttab_free_bitmap, start, n);
374  }
375  
376  /*
377   * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
378   * Introducing a valid entry into the grant table:
379   *  1. Write ent->domid.
380   *  2. Write ent->frame: Frame to which access is permitted.
381   *  3. Write memory barrier (WMB).
382   *  4. Write ent->flags, inc. valid type.
383   */
gnttab_update_entry_v1(grant_ref_t ref,domid_t domid,unsigned long frame,unsigned flags)384  static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
385  				   unsigned long frame, unsigned flags)
386  {
387  	gnttab_shared.v1[ref].domid = domid;
388  	gnttab_shared.v1[ref].frame = frame;
389  	wmb();
390  	gnttab_shared.v1[ref].flags = flags;
391  }
392  
gnttab_update_entry_v2(grant_ref_t ref,domid_t domid,unsigned long frame,unsigned int flags)393  static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
394  				   unsigned long frame, unsigned int flags)
395  {
396  	gnttab_shared.v2[ref].hdr.domid = domid;
397  	gnttab_shared.v2[ref].full_page.frame = frame;
398  	wmb();	/* Hypervisor concurrent accesses. */
399  	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
400  }
401  
402  /*
403   * Public grant-issuing interface functions
404   */
gnttab_grant_foreign_access_ref(grant_ref_t ref,domid_t domid,unsigned long frame,int readonly)405  void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
406  				     unsigned long frame, int readonly)
407  {
408  	gnttab_interface->update_entry(ref, domid, frame,
409  			   GTF_permit_access | (readonly ? GTF_readonly : 0));
410  }
411  EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
412  
gnttab_grant_foreign_access(domid_t domid,unsigned long frame,int readonly)413  int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
414  				int readonly)
415  {
416  	int ref;
417  
418  	ref = get_free_entries(1);
419  	if (unlikely(ref < 0))
420  		return -ENOSPC;
421  
422  	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
423  
424  	return ref;
425  }
426  EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
427  
gnttab_end_foreign_access_ref_v1(grant_ref_t ref)428  static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
429  {
430  	u16 *pflags = &gnttab_shared.v1[ref].flags;
431  	u16 flags;
432  
433  	flags = *pflags;
434  	do {
435  		if (flags & (GTF_reading|GTF_writing))
436  			return 0;
437  	} while (!sync_try_cmpxchg(pflags, &flags, 0));
438  
439  	return 1;
440  }
441  
gnttab_end_foreign_access_ref_v2(grant_ref_t ref)442  static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
443  {
444  	gnttab_shared.v2[ref].hdr.flags = 0;
445  	mb();	/* Concurrent access by hypervisor. */
446  	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
447  		return 0;
448  	} else {
449  		/*
450  		 * The read of grstatus needs to have acquire semantics.
451  		 *  On x86, reads already have that, and we just need to
452  		 * protect against compiler reorderings.
453  		 * On other architectures we may need a full barrier.
454  		 */
455  #ifdef CONFIG_X86
456  		barrier();
457  #else
458  		mb();
459  #endif
460  	}
461  
462  	return 1;
463  }
464  
_gnttab_end_foreign_access_ref(grant_ref_t ref)465  static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
466  {
467  	return gnttab_interface->end_foreign_access_ref(ref);
468  }
469  
gnttab_end_foreign_access_ref(grant_ref_t ref)470  int gnttab_end_foreign_access_ref(grant_ref_t ref)
471  {
472  	if (_gnttab_end_foreign_access_ref(ref))
473  		return 1;
474  	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
475  	return 0;
476  }
477  EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
478  
gnttab_read_frame_v1(grant_ref_t ref)479  static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
480  {
481  	return gnttab_shared.v1[ref].frame;
482  }
483  
gnttab_read_frame_v2(grant_ref_t ref)484  static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
485  {
486  	return gnttab_shared.v2[ref].full_page.frame;
487  }
488  
489  struct deferred_entry {
490  	struct list_head list;
491  	grant_ref_t ref;
492  	uint16_t warn_delay;
493  	struct page *page;
494  };
495  static LIST_HEAD(deferred_list);
496  static void gnttab_handle_deferred(struct timer_list *);
497  static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
498  
499  static atomic64_t deferred_count;
500  static atomic64_t leaked_count;
501  static unsigned int free_per_iteration = 10;
502  module_param(free_per_iteration, uint, 0600);
503  
gnttab_handle_deferred(struct timer_list * unused)504  static void gnttab_handle_deferred(struct timer_list *unused)
505  {
506  	unsigned int nr = READ_ONCE(free_per_iteration);
507  	const bool ignore_limit = nr == 0;
508  	struct deferred_entry *first = NULL;
509  	unsigned long flags;
510  	size_t freed = 0;
511  
512  	spin_lock_irqsave(&gnttab_list_lock, flags);
513  	while ((ignore_limit || nr--) && !list_empty(&deferred_list)) {
514  		struct deferred_entry *entry
515  			= list_first_entry(&deferred_list,
516  					   struct deferred_entry, list);
517  
518  		if (entry == first)
519  			break;
520  		list_del(&entry->list);
521  		spin_unlock_irqrestore(&gnttab_list_lock, flags);
522  		if (_gnttab_end_foreign_access_ref(entry->ref)) {
523  			uint64_t ret = atomic64_dec_return(&deferred_count);
524  
525  			put_free_entry(entry->ref);
526  			pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n",
527  				 entry->ref, page_to_pfn(entry->page),
528  				 (unsigned long long)ret);
529  			put_page(entry->page);
530  			freed++;
531  			kfree(entry);
532  			entry = NULL;
533  		} else {
534  			if (!--entry->warn_delay)
535  				pr_info("g.e. %#x still pending\n", entry->ref);
536  			if (!first)
537  				first = entry;
538  		}
539  		spin_lock_irqsave(&gnttab_list_lock, flags);
540  		if (entry)
541  			list_add_tail(&entry->list, &deferred_list);
542  	}
543  	if (list_empty(&deferred_list))
544  		WARN_ON(atomic64_read(&deferred_count));
545  	else if (!timer_pending(&deferred_timer)) {
546  		deferred_timer.expires = jiffies + HZ;
547  		add_timer(&deferred_timer);
548  	}
549  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
550  	pr_debug("Freed %zu references", freed);
551  }
552  
gnttab_add_deferred(grant_ref_t ref,struct page * page)553  static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
554  {
555  	struct deferred_entry *entry;
556  	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
557  	uint64_t leaked, deferred;
558  
559  	entry = kmalloc(sizeof(*entry), gfp);
560  	if (!page) {
561  		unsigned long gfn = gnttab_interface->read_frame(ref);
562  
563  		page = pfn_to_page(gfn_to_pfn(gfn));
564  		get_page(page);
565  	}
566  
567  	if (entry) {
568  		unsigned long flags;
569  
570  		entry->ref = ref;
571  		entry->page = page;
572  		entry->warn_delay = 60;
573  		spin_lock_irqsave(&gnttab_list_lock, flags);
574  		list_add_tail(&entry->list, &deferred_list);
575  		if (!timer_pending(&deferred_timer)) {
576  			deferred_timer.expires = jiffies + HZ;
577  			add_timer(&deferred_timer);
578  		}
579  		spin_unlock_irqrestore(&gnttab_list_lock, flags);
580  		deferred = atomic64_inc_return(&deferred_count);
581  		leaked = atomic64_read(&leaked_count);
582  		pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
583  			 ref, page ? page_to_pfn(page) : -1, deferred, leaked);
584  	} else {
585  		deferred = atomic64_read(&deferred_count);
586  		leaked = atomic64_inc_return(&leaked_count);
587  		pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
588  			ref, page ? page_to_pfn(page) : -1, deferred, leaked);
589  	}
590  }
591  
gnttab_try_end_foreign_access(grant_ref_t ref)592  int gnttab_try_end_foreign_access(grant_ref_t ref)
593  {
594  	int ret = _gnttab_end_foreign_access_ref(ref);
595  
596  	if (ret)
597  		put_free_entry(ref);
598  
599  	return ret;
600  }
601  EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
602  
gnttab_end_foreign_access(grant_ref_t ref,struct page * page)603  void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
604  {
605  	if (gnttab_try_end_foreign_access(ref)) {
606  		if (page)
607  			put_page(page);
608  	} else
609  		gnttab_add_deferred(ref, page);
610  }
611  EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
612  
gnttab_free_grant_reference(grant_ref_t ref)613  void gnttab_free_grant_reference(grant_ref_t ref)
614  {
615  	put_free_entry(ref);
616  }
617  EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
618  
gnttab_free_grant_references(grant_ref_t head)619  void gnttab_free_grant_references(grant_ref_t head)
620  {
621  	grant_ref_t ref;
622  	unsigned long flags;
623  
624  	spin_lock_irqsave(&gnttab_list_lock, flags);
625  	while (head != GNTTAB_LIST_END) {
626  		ref = gnttab_entry(head);
627  		put_free_entry_locked(head);
628  		head = ref;
629  	}
630  	check_free_callbacks();
631  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
632  }
633  EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
634  
gnttab_free_grant_reference_seq(grant_ref_t head,unsigned int count)635  void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
636  {
637  	unsigned long flags;
638  	unsigned int i;
639  
640  	spin_lock_irqsave(&gnttab_list_lock, flags);
641  	for (i = count; i > 0; i--)
642  		put_free_entry_locked(head + i - 1);
643  	check_free_callbacks();
644  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
645  }
646  EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
647  
gnttab_alloc_grant_references(u16 count,grant_ref_t * head)648  int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
649  {
650  	int h = get_free_entries(count);
651  
652  	if (h < 0)
653  		return -ENOSPC;
654  
655  	*head = h;
656  
657  	return 0;
658  }
659  EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
660  
gnttab_alloc_grant_reference_seq(unsigned int count,grant_ref_t * first)661  int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
662  {
663  	int h;
664  
665  	if (count == 1)
666  		h = get_free_entries(1);
667  	else
668  		h = get_free_entries_seq(count);
669  
670  	if (h < 0)
671  		return -ENOSPC;
672  
673  	*first = h;
674  
675  	return 0;
676  }
677  EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
678  
gnttab_empty_grant_references(const grant_ref_t * private_head)679  int gnttab_empty_grant_references(const grant_ref_t *private_head)
680  {
681  	return (*private_head == GNTTAB_LIST_END);
682  }
683  EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
684  
gnttab_claim_grant_reference(grant_ref_t * private_head)685  int gnttab_claim_grant_reference(grant_ref_t *private_head)
686  {
687  	grant_ref_t g = *private_head;
688  	if (unlikely(g == GNTTAB_LIST_END))
689  		return -ENOSPC;
690  	*private_head = gnttab_entry(g);
691  	return g;
692  }
693  EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
694  
gnttab_release_grant_reference(grant_ref_t * private_head,grant_ref_t release)695  void gnttab_release_grant_reference(grant_ref_t *private_head,
696  				    grant_ref_t release)
697  {
698  	gnttab_entry(release) = *private_head;
699  	*private_head = release;
700  }
701  EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
702  
gnttab_request_free_callback(struct gnttab_free_callback * callback,void (* fn)(void *),void * arg,u16 count)703  void gnttab_request_free_callback(struct gnttab_free_callback *callback,
704  				  void (*fn)(void *), void *arg, u16 count)
705  {
706  	unsigned long flags;
707  	struct gnttab_free_callback *cb;
708  
709  	spin_lock_irqsave(&gnttab_list_lock, flags);
710  
711  	/* Check if the callback is already on the list */
712  	cb = gnttab_free_callback_list;
713  	while (cb) {
714  		if (cb == callback)
715  			goto out;
716  		cb = cb->next;
717  	}
718  
719  	callback->fn = fn;
720  	callback->arg = arg;
721  	callback->count = count;
722  	callback->next = gnttab_free_callback_list;
723  	gnttab_free_callback_list = callback;
724  	check_free_callbacks();
725  out:
726  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
727  }
728  EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
729  
gnttab_cancel_free_callback(struct gnttab_free_callback * callback)730  void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
731  {
732  	struct gnttab_free_callback **pcb;
733  	unsigned long flags;
734  
735  	spin_lock_irqsave(&gnttab_list_lock, flags);
736  	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
737  		if (*pcb == callback) {
738  			*pcb = callback->next;
739  			break;
740  		}
741  	}
742  	spin_unlock_irqrestore(&gnttab_list_lock, flags);
743  }
744  EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
745  
gnttab_frames(unsigned int frames,unsigned int align)746  static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
747  {
748  	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
749  	       align;
750  }
751  
grow_gnttab_list(unsigned int more_frames)752  static int grow_gnttab_list(unsigned int more_frames)
753  {
754  	unsigned int new_nr_grant_frames, extra_entries, i;
755  	unsigned int nr_glist_frames, new_nr_glist_frames;
756  	unsigned int grefs_per_frame;
757  
758  	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
759  
760  	new_nr_grant_frames = nr_grant_frames + more_frames;
761  	extra_entries = more_frames * grefs_per_frame;
762  
763  	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
764  	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
765  	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
766  		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
767  		if (!gnttab_list[i])
768  			goto grow_nomem;
769  	}
770  
771  	gnttab_set_free(gnttab_size, extra_entries);
772  
773  	if (!gnttab_free_tail_ptr)
774  		gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
775  
776  	nr_grant_frames = new_nr_grant_frames;
777  	gnttab_size += extra_entries;
778  
779  	check_free_callbacks();
780  
781  	return 0;
782  
783  grow_nomem:
784  	while (i-- > nr_glist_frames)
785  		free_page((unsigned long) gnttab_list[i]);
786  	return -ENOMEM;
787  }
788  
__max_nr_grant_frames(void)789  static unsigned int __max_nr_grant_frames(void)
790  {
791  	struct gnttab_query_size query;
792  	int rc;
793  
794  	query.dom = DOMID_SELF;
795  
796  	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
797  	if ((rc < 0) || (query.status != GNTST_okay))
798  		return 4; /* Legacy max supported number of frames */
799  
800  	return query.max_nr_frames;
801  }
802  
gnttab_max_grant_frames(void)803  unsigned int gnttab_max_grant_frames(void)
804  {
805  	unsigned int xen_max = __max_nr_grant_frames();
806  	static unsigned int boot_max_nr_grant_frames;
807  
808  	/* First time, initialize it properly. */
809  	if (!boot_max_nr_grant_frames)
810  		boot_max_nr_grant_frames = __max_nr_grant_frames();
811  
812  	if (xen_max > boot_max_nr_grant_frames)
813  		return boot_max_nr_grant_frames;
814  	return xen_max;
815  }
816  EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
817  
gnttab_setup_auto_xlat_frames(phys_addr_t addr)818  int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
819  {
820  	xen_pfn_t *pfn;
821  	unsigned int max_nr_gframes = __max_nr_grant_frames();
822  	unsigned int i;
823  	void *vaddr;
824  
825  	if (xen_auto_xlat_grant_frames.count)
826  		return -EINVAL;
827  
828  	vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
829  	if (vaddr == NULL) {
830  		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
831  			&addr);
832  		return -ENOMEM;
833  	}
834  	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
835  	if (!pfn) {
836  		memunmap(vaddr);
837  		return -ENOMEM;
838  	}
839  	for (i = 0; i < max_nr_gframes; i++)
840  		pfn[i] = XEN_PFN_DOWN(addr) + i;
841  
842  	xen_auto_xlat_grant_frames.vaddr = vaddr;
843  	xen_auto_xlat_grant_frames.pfn = pfn;
844  	xen_auto_xlat_grant_frames.count = max_nr_gframes;
845  
846  	return 0;
847  }
848  EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
849  
gnttab_free_auto_xlat_frames(void)850  void gnttab_free_auto_xlat_frames(void)
851  {
852  	if (!xen_auto_xlat_grant_frames.count)
853  		return;
854  	kfree(xen_auto_xlat_grant_frames.pfn);
855  	memunmap(xen_auto_xlat_grant_frames.vaddr);
856  
857  	xen_auto_xlat_grant_frames.pfn = NULL;
858  	xen_auto_xlat_grant_frames.count = 0;
859  	xen_auto_xlat_grant_frames.vaddr = NULL;
860  }
861  EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
862  
gnttab_pages_set_private(int nr_pages,struct page ** pages)863  int gnttab_pages_set_private(int nr_pages, struct page **pages)
864  {
865  	int i;
866  
867  	for (i = 0; i < nr_pages; i++) {
868  #if BITS_PER_LONG < 64
869  		struct xen_page_foreign *foreign;
870  
871  		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
872  		if (!foreign)
873  			return -ENOMEM;
874  
875  		set_page_private(pages[i], (unsigned long)foreign);
876  #endif
877  		SetPagePrivate(pages[i]);
878  	}
879  
880  	return 0;
881  }
882  EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
883  
884  /**
885   * gnttab_alloc_pages - alloc pages suitable for grant mapping into
886   * @nr_pages: number of pages to alloc
887   * @pages: returns the pages
888   */
gnttab_alloc_pages(int nr_pages,struct page ** pages)889  int gnttab_alloc_pages(int nr_pages, struct page **pages)
890  {
891  	int ret;
892  
893  	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
894  	if (ret < 0)
895  		return ret;
896  
897  	ret = gnttab_pages_set_private(nr_pages, pages);
898  	if (ret < 0)
899  		gnttab_free_pages(nr_pages, pages);
900  
901  	return ret;
902  }
903  EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
904  
905  #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
cache_init(struct gnttab_page_cache * cache)906  static inline void cache_init(struct gnttab_page_cache *cache)
907  {
908  	cache->pages = NULL;
909  }
910  
cache_empty(struct gnttab_page_cache * cache)911  static inline bool cache_empty(struct gnttab_page_cache *cache)
912  {
913  	return !cache->pages;
914  }
915  
cache_deq(struct gnttab_page_cache * cache)916  static inline struct page *cache_deq(struct gnttab_page_cache *cache)
917  {
918  	struct page *page;
919  
920  	page = cache->pages;
921  	cache->pages = page->zone_device_data;
922  
923  	return page;
924  }
925  
cache_enq(struct gnttab_page_cache * cache,struct page * page)926  static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
927  {
928  	page->zone_device_data = cache->pages;
929  	cache->pages = page;
930  }
931  #else
cache_init(struct gnttab_page_cache * cache)932  static inline void cache_init(struct gnttab_page_cache *cache)
933  {
934  	INIT_LIST_HEAD(&cache->pages);
935  }
936  
cache_empty(struct gnttab_page_cache * cache)937  static inline bool cache_empty(struct gnttab_page_cache *cache)
938  {
939  	return list_empty(&cache->pages);
940  }
941  
cache_deq(struct gnttab_page_cache * cache)942  static inline struct page *cache_deq(struct gnttab_page_cache *cache)
943  {
944  	struct page *page;
945  
946  	page = list_first_entry(&cache->pages, struct page, lru);
947  	list_del(&page->lru);
948  
949  	return page;
950  }
951  
cache_enq(struct gnttab_page_cache * cache,struct page * page)952  static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
953  {
954  	list_add(&page->lru, &cache->pages);
955  }
956  #endif
957  
gnttab_page_cache_init(struct gnttab_page_cache * cache)958  void gnttab_page_cache_init(struct gnttab_page_cache *cache)
959  {
960  	spin_lock_init(&cache->lock);
961  	cache_init(cache);
962  	cache->num_pages = 0;
963  }
964  EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
965  
gnttab_page_cache_get(struct gnttab_page_cache * cache,struct page ** page)966  int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
967  {
968  	unsigned long flags;
969  
970  	spin_lock_irqsave(&cache->lock, flags);
971  
972  	if (cache_empty(cache)) {
973  		spin_unlock_irqrestore(&cache->lock, flags);
974  		return gnttab_alloc_pages(1, page);
975  	}
976  
977  	page[0] = cache_deq(cache);
978  	cache->num_pages--;
979  
980  	spin_unlock_irqrestore(&cache->lock, flags);
981  
982  	return 0;
983  }
984  EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
985  
gnttab_page_cache_put(struct gnttab_page_cache * cache,struct page ** page,unsigned int num)986  void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
987  			   unsigned int num)
988  {
989  	unsigned long flags;
990  	unsigned int i;
991  
992  	spin_lock_irqsave(&cache->lock, flags);
993  
994  	for (i = 0; i < num; i++)
995  		cache_enq(cache, page[i]);
996  	cache->num_pages += num;
997  
998  	spin_unlock_irqrestore(&cache->lock, flags);
999  }
1000  EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
1001  
gnttab_page_cache_shrink(struct gnttab_page_cache * cache,unsigned int num)1002  void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
1003  {
1004  	struct page *page[10];
1005  	unsigned int i = 0;
1006  	unsigned long flags;
1007  
1008  	spin_lock_irqsave(&cache->lock, flags);
1009  
1010  	while (cache->num_pages > num) {
1011  		page[i] = cache_deq(cache);
1012  		cache->num_pages--;
1013  		if (++i == ARRAY_SIZE(page)) {
1014  			spin_unlock_irqrestore(&cache->lock, flags);
1015  			gnttab_free_pages(i, page);
1016  			i = 0;
1017  			spin_lock_irqsave(&cache->lock, flags);
1018  		}
1019  	}
1020  
1021  	spin_unlock_irqrestore(&cache->lock, flags);
1022  
1023  	if (i != 0)
1024  		gnttab_free_pages(i, page);
1025  }
1026  EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1027  
gnttab_pages_clear_private(int nr_pages,struct page ** pages)1028  void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1029  {
1030  	int i;
1031  
1032  	for (i = 0; i < nr_pages; i++) {
1033  		if (PagePrivate(pages[i])) {
1034  #if BITS_PER_LONG < 64
1035  			kfree((void *)page_private(pages[i]));
1036  #endif
1037  			ClearPagePrivate(pages[i]);
1038  		}
1039  	}
1040  }
1041  EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1042  
1043  /**
1044   * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1045   * @nr_pages: number of pages to free
1046   * @pages: the pages
1047   */
gnttab_free_pages(int nr_pages,struct page ** pages)1048  void gnttab_free_pages(int nr_pages, struct page **pages)
1049  {
1050  	gnttab_pages_clear_private(nr_pages, pages);
1051  	xen_free_unpopulated_pages(nr_pages, pages);
1052  }
1053  EXPORT_SYMBOL_GPL(gnttab_free_pages);
1054  
1055  #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1056  /**
1057   * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1058   * @args: arguments to the function
1059   */
gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args * args)1060  int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1061  {
1062  	unsigned long pfn, start_pfn;
1063  	size_t size;
1064  	int i, ret;
1065  
1066  	if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1067  		return -ENOMEM;
1068  
1069  	size = args->nr_pages << PAGE_SHIFT;
1070  	if (args->coherent)
1071  		args->vaddr = dma_alloc_coherent(args->dev, size,
1072  						 &args->dev_bus_addr,
1073  						 GFP_KERNEL | __GFP_NOWARN);
1074  	else
1075  		args->vaddr = dma_alloc_wc(args->dev, size,
1076  					   &args->dev_bus_addr,
1077  					   GFP_KERNEL | __GFP_NOWARN);
1078  	if (!args->vaddr) {
1079  		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1080  		return -ENOMEM;
1081  	}
1082  
1083  	start_pfn = __phys_to_pfn(args->dev_bus_addr);
1084  	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1085  			pfn++, i++) {
1086  		struct page *page = pfn_to_page(pfn);
1087  
1088  		args->pages[i] = page;
1089  		args->frames[i] = xen_page_to_gfn(page);
1090  		xenmem_reservation_scrub_page(page);
1091  	}
1092  
1093  	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1094  
1095  	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1096  	if (ret != args->nr_pages) {
1097  		pr_debug("Failed to decrease reservation for DMA buffer\n");
1098  		ret = -EFAULT;
1099  		goto fail;
1100  	}
1101  
1102  	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1103  	if (ret < 0)
1104  		goto fail;
1105  
1106  	return 0;
1107  
1108  fail:
1109  	gnttab_dma_free_pages(args);
1110  	return ret;
1111  }
1112  EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1113  
1114  /**
1115   * gnttab_dma_free_pages - free DMAable pages
1116   * @args: arguments to the function
1117   */
gnttab_dma_free_pages(struct gnttab_dma_alloc_args * args)1118  int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1119  {
1120  	size_t size;
1121  	int i, ret;
1122  
1123  	gnttab_pages_clear_private(args->nr_pages, args->pages);
1124  
1125  	for (i = 0; i < args->nr_pages; i++)
1126  		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1127  
1128  	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1129  	if (ret != args->nr_pages) {
1130  		pr_debug("Failed to increase reservation for DMA buffer\n");
1131  		ret = -EFAULT;
1132  	} else {
1133  		ret = 0;
1134  	}
1135  
1136  	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1137  					     args->frames);
1138  
1139  	size = args->nr_pages << PAGE_SHIFT;
1140  	if (args->coherent)
1141  		dma_free_coherent(args->dev, size,
1142  				  args->vaddr, args->dev_bus_addr);
1143  	else
1144  		dma_free_wc(args->dev, size,
1145  			    args->vaddr, args->dev_bus_addr);
1146  	return ret;
1147  }
1148  EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1149  #endif
1150  
1151  /* Handling of paged out grant targets (GNTST_eagain) */
1152  #define MAX_DELAY 256
1153  static inline void
gnttab_retry_eagain_gop(unsigned int cmd,void * gop,int16_t * status,const char * func)1154  gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1155  						const char *func)
1156  {
1157  	unsigned delay = 1;
1158  
1159  	do {
1160  		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1161  		if (*status == GNTST_eagain)
1162  			msleep(delay++);
1163  	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1164  
1165  	if (delay >= MAX_DELAY) {
1166  		pr_err("%s: %s eagain grant\n", func, current->comm);
1167  		*status = GNTST_bad_page;
1168  	}
1169  }
1170  
gnttab_batch_map(struct gnttab_map_grant_ref * batch,unsigned count)1171  void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1172  {
1173  	struct gnttab_map_grant_ref *op;
1174  
1175  	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1176  		BUG();
1177  	for (op = batch; op < batch + count; op++)
1178  		if (op->status == GNTST_eagain)
1179  			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1180  						&op->status, __func__);
1181  }
1182  EXPORT_SYMBOL_GPL(gnttab_batch_map);
1183  
gnttab_batch_copy(struct gnttab_copy * batch,unsigned count)1184  void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1185  {
1186  	struct gnttab_copy *op;
1187  
1188  	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1189  		BUG();
1190  	for (op = batch; op < batch + count; op++)
1191  		if (op->status == GNTST_eagain)
1192  			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1193  						&op->status, __func__);
1194  }
1195  EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1196  
gnttab_foreach_grant_in_range(struct page * page,unsigned int offset,unsigned int len,xen_grant_fn_t fn,void * data)1197  void gnttab_foreach_grant_in_range(struct page *page,
1198  				   unsigned int offset,
1199  				   unsigned int len,
1200  				   xen_grant_fn_t fn,
1201  				   void *data)
1202  {
1203  	unsigned int goffset;
1204  	unsigned int glen;
1205  	unsigned long xen_pfn;
1206  
1207  	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1208  	goffset = xen_offset_in_page(offset);
1209  
1210  	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1211  
1212  	while (len) {
1213  		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1214  		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1215  
1216  		goffset = 0;
1217  		xen_pfn++;
1218  		len -= glen;
1219  	}
1220  }
1221  EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1222  
gnttab_foreach_grant(struct page ** pages,unsigned int nr_grefs,xen_grant_fn_t fn,void * data)1223  void gnttab_foreach_grant(struct page **pages,
1224  			  unsigned int nr_grefs,
1225  			  xen_grant_fn_t fn,
1226  			  void *data)
1227  {
1228  	unsigned int goffset = 0;
1229  	unsigned long xen_pfn = 0;
1230  	unsigned int i;
1231  
1232  	for (i = 0; i < nr_grefs; i++) {
1233  		if ((i % XEN_PFN_PER_PAGE) == 0) {
1234  			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1235  			goffset = 0;
1236  		}
1237  
1238  		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1239  
1240  		goffset += XEN_PAGE_SIZE;
1241  		xen_pfn++;
1242  	}
1243  }
1244  
gnttab_map_refs(struct gnttab_map_grant_ref * map_ops,struct gnttab_map_grant_ref * kmap_ops,struct page ** pages,unsigned int count)1245  int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1246  		    struct gnttab_map_grant_ref *kmap_ops,
1247  		    struct page **pages, unsigned int count)
1248  {
1249  	int i, ret;
1250  
1251  	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1252  	if (ret)
1253  		return ret;
1254  
1255  	for (i = 0; i < count; i++) {
1256  		switch (map_ops[i].status) {
1257  		case GNTST_okay:
1258  		{
1259  			struct xen_page_foreign *foreign;
1260  
1261  			SetPageForeign(pages[i]);
1262  			foreign = xen_page_foreign(pages[i]);
1263  			foreign->domid = map_ops[i].dom;
1264  			foreign->gref = map_ops[i].ref;
1265  			break;
1266  		}
1267  
1268  		case GNTST_no_device_space:
1269  			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1270  			break;
1271  
1272  		case GNTST_eagain:
1273  			/* Retry eagain maps */
1274  			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1275  						map_ops + i,
1276  						&map_ops[i].status, __func__);
1277  			/* Test status in next loop iteration. */
1278  			i--;
1279  			break;
1280  
1281  		default:
1282  			break;
1283  		}
1284  	}
1285  
1286  	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1287  }
1288  EXPORT_SYMBOL_GPL(gnttab_map_refs);
1289  
gnttab_unmap_refs(struct gnttab_unmap_grant_ref * unmap_ops,struct gnttab_unmap_grant_ref * kunmap_ops,struct page ** pages,unsigned int count)1290  int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1291  		      struct gnttab_unmap_grant_ref *kunmap_ops,
1292  		      struct page **pages, unsigned int count)
1293  {
1294  	unsigned int i;
1295  	int ret;
1296  
1297  	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1298  	if (ret)
1299  		return ret;
1300  
1301  	for (i = 0; i < count; i++)
1302  		ClearPageForeign(pages[i]);
1303  
1304  	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1305  }
1306  EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1307  
1308  #define GNTTAB_UNMAP_REFS_DELAY 5
1309  
1310  static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1311  
gnttab_unmap_work(struct work_struct * work)1312  static void gnttab_unmap_work(struct work_struct *work)
1313  {
1314  	struct gntab_unmap_queue_data
1315  		*unmap_data = container_of(work,
1316  					   struct gntab_unmap_queue_data,
1317  					   gnttab_work.work);
1318  	if (unmap_data->age != UINT_MAX)
1319  		unmap_data->age++;
1320  	__gnttab_unmap_refs_async(unmap_data);
1321  }
1322  
__gnttab_unmap_refs_async(struct gntab_unmap_queue_data * item)1323  static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1324  {
1325  	int ret;
1326  	int pc;
1327  
1328  	for (pc = 0; pc < item->count; pc++) {
1329  		if (page_count(item->pages[pc]) > 1) {
1330  			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1331  			schedule_delayed_work(&item->gnttab_work,
1332  					      msecs_to_jiffies(delay));
1333  			return;
1334  		}
1335  	}
1336  
1337  	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1338  				item->pages, item->count);
1339  	item->done(ret, item);
1340  }
1341  
gnttab_unmap_refs_async(struct gntab_unmap_queue_data * item)1342  void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1343  {
1344  	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1345  	item->age = 0;
1346  
1347  	__gnttab_unmap_refs_async(item);
1348  }
1349  EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1350  
unmap_refs_callback(int result,struct gntab_unmap_queue_data * data)1351  static void unmap_refs_callback(int result,
1352  		struct gntab_unmap_queue_data *data)
1353  {
1354  	struct unmap_refs_callback_data *d = data->data;
1355  
1356  	d->result = result;
1357  	complete(&d->completion);
1358  }
1359  
gnttab_unmap_refs_sync(struct gntab_unmap_queue_data * item)1360  int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1361  {
1362  	struct unmap_refs_callback_data data;
1363  
1364  	init_completion(&data.completion);
1365  	item->data = &data;
1366  	item->done = &unmap_refs_callback;
1367  	gnttab_unmap_refs_async(item);
1368  	wait_for_completion(&data.completion);
1369  
1370  	return data.result;
1371  }
1372  EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1373  
nr_status_frames(unsigned int nr_grant_frames)1374  static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1375  {
1376  	return gnttab_frames(nr_grant_frames, SPP);
1377  }
1378  
gnttab_map_frames_v1(xen_pfn_t * frames,unsigned int nr_gframes)1379  static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1380  {
1381  	int rc;
1382  
1383  	rc = arch_gnttab_map_shared(frames, nr_gframes,
1384  				    gnttab_max_grant_frames(),
1385  				    &gnttab_shared.addr);
1386  	BUG_ON(rc);
1387  
1388  	return 0;
1389  }
1390  
gnttab_unmap_frames_v1(void)1391  static void gnttab_unmap_frames_v1(void)
1392  {
1393  	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1394  }
1395  
gnttab_map_frames_v2(xen_pfn_t * frames,unsigned int nr_gframes)1396  static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1397  {
1398  	uint64_t *sframes;
1399  	unsigned int nr_sframes;
1400  	struct gnttab_get_status_frames getframes;
1401  	int rc;
1402  
1403  	nr_sframes = nr_status_frames(nr_gframes);
1404  
1405  	/* No need for kzalloc as it is initialized in following hypercall
1406  	 * GNTTABOP_get_status_frames.
1407  	 */
1408  	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1409  	if (!sframes)
1410  		return -ENOMEM;
1411  
1412  	getframes.dom        = DOMID_SELF;
1413  	getframes.nr_frames  = nr_sframes;
1414  	set_xen_guest_handle(getframes.frame_list, sframes);
1415  
1416  	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1417  				       &getframes, 1);
1418  	if (rc == -ENOSYS) {
1419  		kfree(sframes);
1420  		return -ENOSYS;
1421  	}
1422  
1423  	BUG_ON(rc || getframes.status);
1424  
1425  	rc = arch_gnttab_map_status(sframes, nr_sframes,
1426  				    nr_status_frames(gnttab_max_grant_frames()),
1427  				    &grstatus);
1428  	BUG_ON(rc);
1429  	kfree(sframes);
1430  
1431  	rc = arch_gnttab_map_shared(frames, nr_gframes,
1432  				    gnttab_max_grant_frames(),
1433  				    &gnttab_shared.addr);
1434  	BUG_ON(rc);
1435  
1436  	return 0;
1437  }
1438  
gnttab_unmap_frames_v2(void)1439  static void gnttab_unmap_frames_v2(void)
1440  {
1441  	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1442  	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1443  }
1444  
gnttab_map(unsigned int start_idx,unsigned int end_idx)1445  static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1446  {
1447  	struct gnttab_setup_table setup;
1448  	xen_pfn_t *frames;
1449  	unsigned int nr_gframes = end_idx + 1;
1450  	int rc;
1451  
1452  	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1453  		struct xen_add_to_physmap xatp;
1454  		unsigned int i = end_idx;
1455  		rc = 0;
1456  		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1457  		/*
1458  		 * Loop backwards, so that the first hypercall has the largest
1459  		 * index, ensuring that the table will grow only once.
1460  		 */
1461  		do {
1462  			xatp.domid = DOMID_SELF;
1463  			xatp.idx = i;
1464  			xatp.space = XENMAPSPACE_grant_table;
1465  			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1466  			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1467  			if (rc != 0) {
1468  				pr_warn("grant table add_to_physmap failed, err=%d\n",
1469  					rc);
1470  				break;
1471  			}
1472  		} while (i-- > start_idx);
1473  
1474  		return rc;
1475  	}
1476  
1477  	/* No need for kzalloc as it is initialized in following hypercall
1478  	 * GNTTABOP_setup_table.
1479  	 */
1480  	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1481  	if (!frames)
1482  		return -ENOMEM;
1483  
1484  	setup.dom        = DOMID_SELF;
1485  	setup.nr_frames  = nr_gframes;
1486  	set_xen_guest_handle(setup.frame_list, frames);
1487  
1488  	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1489  	if (rc == -ENOSYS) {
1490  		kfree(frames);
1491  		return -ENOSYS;
1492  	}
1493  
1494  	BUG_ON(rc || setup.status);
1495  
1496  	rc = gnttab_interface->map_frames(frames, nr_gframes);
1497  
1498  	kfree(frames);
1499  
1500  	return rc;
1501  }
1502  
1503  static const struct gnttab_ops gnttab_v1_ops = {
1504  	.version			= 1,
1505  	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1506  					  sizeof(struct grant_entry_v1),
1507  	.map_frames			= gnttab_map_frames_v1,
1508  	.unmap_frames			= gnttab_unmap_frames_v1,
1509  	.update_entry			= gnttab_update_entry_v1,
1510  	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1511  	.read_frame			= gnttab_read_frame_v1,
1512  };
1513  
1514  static const struct gnttab_ops gnttab_v2_ops = {
1515  	.version			= 2,
1516  	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1517  					  sizeof(union grant_entry_v2),
1518  	.map_frames			= gnttab_map_frames_v2,
1519  	.unmap_frames			= gnttab_unmap_frames_v2,
1520  	.update_entry			= gnttab_update_entry_v2,
1521  	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1522  	.read_frame			= gnttab_read_frame_v2,
1523  };
1524  
gnttab_need_v2(void)1525  static bool gnttab_need_v2(void)
1526  {
1527  #ifdef CONFIG_X86
1528  	uint32_t base, width;
1529  
1530  	if (xen_pv_domain()) {
1531  		base = xen_cpuid_base();
1532  		if (cpuid_eax(base) < 5)
1533  			return false;	/* Information not available, use V1. */
1534  		width = cpuid_ebx(base + 5) &
1535  			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1536  		return width > 32 + PAGE_SHIFT;
1537  	}
1538  #endif
1539  	return !!(max_possible_pfn >> 32);
1540  }
1541  
gnttab_request_version(void)1542  static void gnttab_request_version(void)
1543  {
1544  	long rc;
1545  	struct gnttab_set_version gsv;
1546  
1547  	if (gnttab_need_v2())
1548  		gsv.version = 2;
1549  	else
1550  		gsv.version = 1;
1551  
1552  	/* Boot parameter overrides automatic selection. */
1553  	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1554  		gsv.version = xen_gnttab_version;
1555  
1556  	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1557  	if (rc == 0 && gsv.version == 2)
1558  		gnttab_interface = &gnttab_v2_ops;
1559  	else
1560  		gnttab_interface = &gnttab_v1_ops;
1561  	pr_info("Grant tables using version %d layout\n",
1562  		gnttab_interface->version);
1563  }
1564  
gnttab_setup(void)1565  static int gnttab_setup(void)
1566  {
1567  	unsigned int max_nr_gframes;
1568  
1569  	max_nr_gframes = gnttab_max_grant_frames();
1570  	if (max_nr_gframes < nr_grant_frames)
1571  		return -ENOSYS;
1572  
1573  	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1574  		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1575  		if (gnttab_shared.addr == NULL) {
1576  			pr_warn("gnttab share frames is not mapped!\n");
1577  			return -ENOMEM;
1578  		}
1579  	}
1580  	return gnttab_map(0, nr_grant_frames - 1);
1581  }
1582  
gnttab_resume(void)1583  int gnttab_resume(void)
1584  {
1585  	gnttab_request_version();
1586  	return gnttab_setup();
1587  }
1588  
gnttab_suspend(void)1589  int gnttab_suspend(void)
1590  {
1591  	if (!xen_feature(XENFEAT_auto_translated_physmap))
1592  		gnttab_interface->unmap_frames();
1593  	return 0;
1594  }
1595  
gnttab_expand(unsigned int req_entries)1596  static int gnttab_expand(unsigned int req_entries)
1597  {
1598  	int rc;
1599  	unsigned int cur, extra;
1600  
1601  	cur = nr_grant_frames;
1602  	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1603  		 gnttab_interface->grefs_per_grant_frame);
1604  	if (cur + extra > gnttab_max_grant_frames()) {
1605  		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1606  				    " cur=%u extra=%u limit=%u"
1607  				    " gnttab_free_count=%u req_entries=%u\n",
1608  				    cur, extra, gnttab_max_grant_frames(),
1609  				    gnttab_free_count, req_entries);
1610  		return -ENOSPC;
1611  	}
1612  
1613  	rc = gnttab_map(cur, cur + extra - 1);
1614  	if (rc == 0)
1615  		rc = grow_gnttab_list(extra);
1616  
1617  	return rc;
1618  }
1619  
gnttab_init(void)1620  int gnttab_init(void)
1621  {
1622  	int i;
1623  	unsigned long max_nr_grant_frames, max_nr_grefs;
1624  	unsigned int max_nr_glist_frames, nr_glist_frames;
1625  	int ret;
1626  
1627  	gnttab_request_version();
1628  	max_nr_grant_frames = gnttab_max_grant_frames();
1629  	max_nr_grefs = max_nr_grant_frames *
1630  			gnttab_interface->grefs_per_grant_frame;
1631  	nr_grant_frames = 1;
1632  
1633  	/* Determine the maximum number of frames required for the
1634  	 * grant reference free list on the current hypervisor.
1635  	 */
1636  	max_nr_glist_frames = max_nr_grefs / RPP;
1637  
1638  	gnttab_list = kmalloc_array(max_nr_glist_frames,
1639  				    sizeof(grant_ref_t *),
1640  				    GFP_KERNEL);
1641  	if (gnttab_list == NULL)
1642  		return -ENOMEM;
1643  
1644  	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1645  	for (i = 0; i < nr_glist_frames; i++) {
1646  		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1647  		if (gnttab_list[i] == NULL) {
1648  			ret = -ENOMEM;
1649  			goto ini_nomem;
1650  		}
1651  	}
1652  
1653  	gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1654  	if (!gnttab_free_bitmap) {
1655  		ret = -ENOMEM;
1656  		goto ini_nomem;
1657  	}
1658  
1659  	ret = arch_gnttab_init(max_nr_grant_frames,
1660  			       nr_status_frames(max_nr_grant_frames));
1661  	if (ret < 0)
1662  		goto ini_nomem;
1663  
1664  	if (gnttab_setup() < 0) {
1665  		ret = -ENODEV;
1666  		goto ini_nomem;
1667  	}
1668  
1669  	gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
1670  
1671  	gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1672  			gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
1673  
1674  	printk("Grant table initialized\n");
1675  	return 0;
1676  
1677   ini_nomem:
1678  	for (i--; i >= 0; i--)
1679  		free_page((unsigned long)gnttab_list[i]);
1680  	kfree(gnttab_list);
1681  	bitmap_free(gnttab_free_bitmap);
1682  	return ret;
1683  }
1684  EXPORT_SYMBOL_GPL(gnttab_init);
1685  
__gnttab_init(void)1686  static int __gnttab_init(void)
1687  {
1688  	if (!xen_domain())
1689  		return -ENODEV;
1690  
1691  	/* Delay grant-table initialization in the PV on HVM case */
1692  	if (xen_hvm_domain() && !xen_pvh_domain())
1693  		return 0;
1694  
1695  	return gnttab_init();
1696  }
1697  /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1698   * beforehand to initialize xen_auto_xlat_grant_frames. */
1699  core_initcall_sync(__gnttab_init);
1700