1  // SPDX-License-Identifier: GPL-2.0 OR MIT
2  /**************************************************************************
3   *
4   * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the
8   * "Software"), to deal in the Software without restriction, including
9   * without limitation the rights to use, copy, modify, merge, publish,
10   * distribute, sub license, and/or sell copies of the Software, and to
11   * permit persons to whom the Software is furnished to do so, subject to
12   * the following conditions:
13   *
14   * The above copyright notice and this permission notice (including the
15   * next paragraph) shall be included in all copies or substantial portions
16   * of the Software.
17   *
18   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22   * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24   * USE OR OTHER DEALINGS IN THE SOFTWARE.
25   *
26   **************************************************************************/
27  /*
28   * Treat context OTables as resources to make use of the resource
29   * backing MOB eviction mechanism, that is used to read back the COTable
30   * whenever the backing MOB is evicted.
31   */
32  
33  #include "vmwgfx_bo.h"
34  #include "vmwgfx_drv.h"
35  #include "vmwgfx_mksstat.h"
36  #include "vmwgfx_resource_priv.h"
37  #include "vmwgfx_so.h"
38  
39  #include <drm/ttm/ttm_placement.h>
40  
41  /**
42   * struct vmw_cotable - Context Object Table resource
43   *
44   * @res: struct vmw_resource we are deriving from.
45   * @ctx: non-refcounted pointer to the owning context.
46   * @size_read_back: Size of data read back during eviction.
47   * @seen_entries: Seen entries in command stream for this cotable.
48   * @type: The cotable type.
49   * @scrubbed: Whether the cotable has been scrubbed.
50   * @resource_list: List of resources in the cotable.
51   */
52  struct vmw_cotable {
53  	struct vmw_resource res;
54  	struct vmw_resource *ctx;
55  	size_t size_read_back;
56  	int seen_entries;
57  	u32 type;
58  	bool scrubbed;
59  	struct list_head resource_list;
60  };
61  
62  /**
63   * struct vmw_cotable_info - Static info about cotable types
64   *
65   * @min_initial_entries: Min number of initial intries at cotable allocation
66   * for this cotable type.
67   * @size: Size of each entry.
68   * @unbind_func: Unbind call-back function.
69   */
70  struct vmw_cotable_info {
71  	u32 min_initial_entries;
72  	u32 size;
73  	void (*unbind_func)(struct vmw_private *, struct list_head *,
74  			    bool);
75  };
76  
77  
78  /*
79   * Getting the initial size right is difficult because it all depends
80   * on what the userspace is doing. The sizes will be aligned up to
81   * a PAGE_SIZE so we just want to make sure that for majority of apps
82   * the initial number of entries doesn't require an immediate resize.
83   * For all cotables except SVGACOTableDXElementLayoutEntry and
84   * SVGACOTableDXBlendStateEntry the initial number of entries fits
85   * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
86   * SVGACOTableDXBlendStateEntry we want to reserve two pages,
87   * because that's what all apps will require initially.
88   */
89  static const struct vmw_cotable_info co_info[] = {
90  	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
91  	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
92  	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
93  	{PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
94  	{PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
95  	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
96  	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
97  	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
98  	{1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
99  	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
100  	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
101  	{1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
102  };
103  
104  /*
105   * Cotables with bindings that we remove must be scrubbed first,
106   * otherwise, the device will swap in an invalid context when we remove
107   * bindings before scrubbing a cotable...
108   */
109  const SVGACOTableType vmw_cotable_scrub_order[] = {
110  	SVGA_COTABLE_RTVIEW,
111  	SVGA_COTABLE_DSVIEW,
112  	SVGA_COTABLE_SRVIEW,
113  	SVGA_COTABLE_DXSHADER,
114  	SVGA_COTABLE_ELEMENTLAYOUT,
115  	SVGA_COTABLE_BLENDSTATE,
116  	SVGA_COTABLE_DEPTHSTENCIL,
117  	SVGA_COTABLE_RASTERIZERSTATE,
118  	SVGA_COTABLE_SAMPLER,
119  	SVGA_COTABLE_STREAMOUTPUT,
120  	SVGA_COTABLE_DXQUERY,
121  	SVGA_COTABLE_UAVIEW,
122  };
123  
124  static int vmw_cotable_bind(struct vmw_resource *res,
125  			    struct ttm_validate_buffer *val_buf);
126  static int vmw_cotable_unbind(struct vmw_resource *res,
127  			      bool readback,
128  			      struct ttm_validate_buffer *val_buf);
129  static int vmw_cotable_create(struct vmw_resource *res);
130  static int vmw_cotable_destroy(struct vmw_resource *res);
131  
132  static const struct vmw_res_func vmw_cotable_func = {
133  	.res_type = vmw_res_cotable,
134  	.needs_guest_memory = true,
135  	.may_evict = true,
136  	.prio = 3,
137  	.dirty_prio = 3,
138  	.type_name = "context guest backed object tables",
139  	.domain = VMW_BO_DOMAIN_MOB,
140  	.busy_domain = VMW_BO_DOMAIN_MOB,
141  	.create = vmw_cotable_create,
142  	.destroy = vmw_cotable_destroy,
143  	.bind = vmw_cotable_bind,
144  	.unbind = vmw_cotable_unbind,
145  };
146  
147  /**
148   * vmw_cotable - Convert a struct vmw_resource pointer to a struct
149   * vmw_cotable pointer
150   *
151   * @res: Pointer to the resource.
152   */
vmw_cotable(struct vmw_resource * res)153  static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
154  {
155  	return container_of(res, struct vmw_cotable, res);
156  }
157  
158  /**
159   * vmw_cotable_destroy - Cotable resource destroy callback
160   *
161   * @res: Pointer to the cotable resource.
162   *
163   * There is no device cotable destroy command, so this function only
164   * makes sure that the resource id is set to invalid.
165   */
vmw_cotable_destroy(struct vmw_resource * res)166  static int vmw_cotable_destroy(struct vmw_resource *res)
167  {
168  	res->id = -1;
169  	return 0;
170  }
171  
172  /**
173   * vmw_cotable_unscrub - Undo a cotable unscrub operation
174   *
175   * @res: Pointer to the cotable resource
176   *
177   * This function issues commands to (re)bind the cotable to
178   * its backing mob, which needs to be validated and reserved at this point.
179   * This is identical to bind() except the function interface looks different.
180   */
vmw_cotable_unscrub(struct vmw_resource * res)181  static int vmw_cotable_unscrub(struct vmw_resource *res)
182  {
183  	struct vmw_cotable *vcotbl = vmw_cotable(res);
184  	struct vmw_private *dev_priv = res->dev_priv;
185  	struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
186  	struct {
187  		SVGA3dCmdHeader header;
188  		SVGA3dCmdDXSetCOTable body;
189  	} *cmd;
190  
191  	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
192  	dma_resv_assert_held(bo->base.resv);
193  
194  	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
195  	if (!cmd)
196  		return -ENOMEM;
197  
198  	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
199  	WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
200  	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
201  	cmd->header.size = sizeof(cmd->body);
202  	cmd->body.cid = vcotbl->ctx->id;
203  	cmd->body.type = vcotbl->type;
204  	cmd->body.mobid = bo->resource->start;
205  	cmd->body.validSizeInBytes = vcotbl->size_read_back;
206  
207  	vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
208  	vcotbl->scrubbed = false;
209  
210  	return 0;
211  }
212  
213  /**
214   * vmw_cotable_bind - Undo a cotable unscrub operation
215   *
216   * @res: Pointer to the cotable resource
217   * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
218   * for convenience / fencing.
219   *
220   * This function issues commands to (re)bind the cotable to
221   * its backing mob, which needs to be validated and reserved at this point.
222   */
vmw_cotable_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)223  static int vmw_cotable_bind(struct vmw_resource *res,
224  			    struct ttm_validate_buffer *val_buf)
225  {
226  	/*
227  	 * The create() callback may have changed @res->backup without
228  	 * the caller noticing, and with val_buf->bo still pointing to
229  	 * the old backup buffer. Although hackish, and not used currently,
230  	 * take the opportunity to correct the value here so that it's not
231  	 * misused in the future.
232  	 */
233  	val_buf->bo = &res->guest_memory_bo->tbo;
234  
235  	return vmw_cotable_unscrub(res);
236  }
237  
238  /**
239   * vmw_cotable_scrub - Scrub the cotable from the device.
240   *
241   * @res: Pointer to the cotable resource.
242   * @readback: Whether initiate a readback of the cotable data to the backup
243   * buffer.
244   *
245   * In some situations (context swapouts) it might be desirable to make the
246   * device forget about the cotable without performing a full unbind. A full
247   * unbind requires reserved backup buffers and it might not be possible to
248   * reserve them due to locking order violation issues. The vmw_cotable_scrub
249   * function implements a partial unbind() without that requirement but with the
250   * following restrictions.
251   * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
252   *    be called.
253   * 2) Before the cotable backing buffer is used by the CPU, or during the
254   *    resource destruction, vmw_cotable_unbind() must be called.
255   */
vmw_cotable_scrub(struct vmw_resource * res,bool readback)256  int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
257  {
258  	struct vmw_cotable *vcotbl = vmw_cotable(res);
259  	struct vmw_private *dev_priv = res->dev_priv;
260  	size_t submit_size;
261  
262  	struct {
263  		SVGA3dCmdHeader header;
264  		SVGA3dCmdDXReadbackCOTable body;
265  	} *cmd0;
266  	struct {
267  		SVGA3dCmdHeader header;
268  		SVGA3dCmdDXSetCOTable body;
269  	} *cmd1;
270  
271  	if (vcotbl->scrubbed)
272  		return 0;
273  
274  	if (co_info[vcotbl->type].unbind_func)
275  		co_info[vcotbl->type].unbind_func(dev_priv,
276  						  &vcotbl->resource_list,
277  						  readback);
278  	submit_size = sizeof(*cmd1);
279  	if (readback)
280  		submit_size += sizeof(*cmd0);
281  
282  	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
283  	if (!cmd1)
284  		return -ENOMEM;
285  
286  	vcotbl->size_read_back = 0;
287  	if (readback) {
288  		cmd0 = (void *) cmd1;
289  		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
290  		cmd0->header.size = sizeof(cmd0->body);
291  		cmd0->body.cid = vcotbl->ctx->id;
292  		cmd0->body.type = vcotbl->type;
293  		cmd1 = (void *) &cmd0[1];
294  		vcotbl->size_read_back = res->guest_memory_size;
295  	}
296  	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
297  	cmd1->header.size = sizeof(cmd1->body);
298  	cmd1->body.cid = vcotbl->ctx->id;
299  	cmd1->body.type = vcotbl->type;
300  	cmd1->body.mobid = SVGA3D_INVALID_ID;
301  	cmd1->body.validSizeInBytes = 0;
302  	vmw_cmd_commit_flush(dev_priv, submit_size);
303  	vcotbl->scrubbed = true;
304  
305  	/* Trigger a create() on next validate. */
306  	res->id = -1;
307  
308  	return 0;
309  }
310  
311  /**
312   * vmw_cotable_unbind - Cotable resource unbind callback
313   *
314   * @res: Pointer to the cotable resource.
315   * @readback: Whether to read back cotable data to the backup buffer.
316   * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
317   * for convenience / fencing.
318   *
319   * Unbinds the cotable from the device and fences the backup buffer.
320   */
vmw_cotable_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)321  static int vmw_cotable_unbind(struct vmw_resource *res,
322  			      bool readback,
323  			      struct ttm_validate_buffer *val_buf)
324  {
325  	struct vmw_cotable *vcotbl = vmw_cotable(res);
326  	struct vmw_private *dev_priv = res->dev_priv;
327  	struct ttm_buffer_object *bo = val_buf->bo;
328  	struct vmw_fence_obj *fence;
329  
330  	if (!vmw_resource_mob_attached(res))
331  		return 0;
332  
333  	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
334  	dma_resv_assert_held(bo->base.resv);
335  
336  	mutex_lock(&dev_priv->binding_mutex);
337  	if (!vcotbl->scrubbed)
338  		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
339  	mutex_unlock(&dev_priv->binding_mutex);
340  	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
341  	vmw_bo_fence_single(bo, fence);
342  	if (likely(fence != NULL))
343  		vmw_fence_obj_unreference(&fence);
344  
345  	return 0;
346  }
347  
348  /**
349   * vmw_cotable_readback - Read back a cotable without unbinding.
350   *
351   * @res: The cotable resource.
352   *
353   * Reads back a cotable to its backing mob without scrubbing the MOB from
354   * the cotable. The MOB is fenced for subsequent CPU access.
355   */
vmw_cotable_readback(struct vmw_resource * res)356  static int vmw_cotable_readback(struct vmw_resource *res)
357  {
358  	struct vmw_cotable *vcotbl = vmw_cotable(res);
359  	struct vmw_private *dev_priv = res->dev_priv;
360  
361  	struct {
362  		SVGA3dCmdHeader header;
363  		SVGA3dCmdDXReadbackCOTable body;
364  	} *cmd;
365  	struct vmw_fence_obj *fence;
366  
367  	if (!vcotbl->scrubbed) {
368  		cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
369  		if (!cmd)
370  			return -ENOMEM;
371  
372  		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
373  		cmd->header.size = sizeof(cmd->body);
374  		cmd->body.cid = vcotbl->ctx->id;
375  		cmd->body.type = vcotbl->type;
376  		vcotbl->size_read_back = res->guest_memory_size;
377  		vmw_cmd_commit(dev_priv, sizeof(*cmd));
378  	}
379  
380  	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
381  	vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
382  	vmw_fence_obj_unreference(&fence);
383  
384  	return 0;
385  }
386  
387  /**
388   * vmw_cotable_resize - Resize a cotable.
389   *
390   * @res: The cotable resource.
391   * @new_size: The new size.
392   *
393   * Resizes a cotable and binds the new backup buffer.
394   * On failure the cotable is left intact.
395   * Important! This function may not fail once the MOB switch has been
396   * committed to hardware. That would put the device context in an
397   * invalid state which we can't currently recover from.
398   */
vmw_cotable_resize(struct vmw_resource * res,size_t new_size)399  static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
400  {
401  	struct ttm_operation_ctx ctx = { false, false };
402  	struct vmw_private *dev_priv = res->dev_priv;
403  	struct vmw_cotable *vcotbl = vmw_cotable(res);
404  	struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
405  	struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
406  	size_t old_size = res->guest_memory_size;
407  	size_t old_size_read_back = vcotbl->size_read_back;
408  	size_t cur_size_read_back;
409  	struct ttm_bo_kmap_obj old_map, new_map;
410  	int ret;
411  	size_t i;
412  	struct vmw_bo_params bo_params = {
413  		.domain = VMW_BO_DOMAIN_MOB,
414  		.busy_domain = VMW_BO_DOMAIN_MOB,
415  		.bo_type = ttm_bo_type_device,
416  		.size = new_size,
417  		.pin = true
418  	};
419  
420  	MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
421  	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
422  
423  	ret = vmw_cotable_readback(res);
424  	if (ret)
425  		goto out_done;
426  
427  	cur_size_read_back = vcotbl->size_read_back;
428  	vcotbl->size_read_back = old_size_read_back;
429  
430  	/*
431  	 * While device is processing, Allocate and reserve a buffer object
432  	 * for the new COTable. Initially pin the buffer object to make sure
433  	 * we can use tryreserve without failure.
434  	 */
435  	ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
436  	if (ret) {
437  		DRM_ERROR("Failed initializing new cotable MOB.\n");
438  		goto out_done;
439  	}
440  
441  	bo = &buf->tbo;
442  	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
443  
444  	ret = ttm_bo_wait(old_bo, false, false);
445  	if (unlikely(ret != 0)) {
446  		DRM_ERROR("Failed waiting for cotable unbind.\n");
447  		goto out_wait;
448  	}
449  
450  	/*
451  	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
452  	 * This should really be a TTM utility.
453  	 */
454  	for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
455  		bool dummy;
456  
457  		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
458  		if (unlikely(ret != 0)) {
459  			DRM_ERROR("Failed mapping old COTable on resize.\n");
460  			goto out_wait;
461  		}
462  		ret = ttm_bo_kmap(bo, i, 1, &new_map);
463  		if (unlikely(ret != 0)) {
464  			DRM_ERROR("Failed mapping new COTable on resize.\n");
465  			goto out_map_new;
466  		}
467  		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
468  		       ttm_kmap_obj_virtual(&old_map, &dummy),
469  		       PAGE_SIZE);
470  		ttm_bo_kunmap(&new_map);
471  		ttm_bo_kunmap(&old_map);
472  	}
473  
474  	/* Unpin new buffer, and switch backup buffers. */
475  	vmw_bo_placement_set(buf,
476  			     VMW_BO_DOMAIN_MOB,
477  			     VMW_BO_DOMAIN_MOB);
478  	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
479  	if (unlikely(ret != 0)) {
480  		DRM_ERROR("Failed validating new COTable backup buffer.\n");
481  		goto out_wait;
482  	}
483  
484  	vmw_resource_mob_detach(res);
485  	res->guest_memory_bo = buf;
486  	res->guest_memory_size = new_size;
487  	vcotbl->size_read_back = cur_size_read_back;
488  
489  	/*
490  	 * Now tell the device to switch. If this fails, then we need to
491  	 * revert the full resize.
492  	 */
493  	ret = vmw_cotable_unscrub(res);
494  	if (ret) {
495  		DRM_ERROR("Failed switching COTable backup buffer.\n");
496  		res->guest_memory_bo = old_buf;
497  		res->guest_memory_size = old_size;
498  		vcotbl->size_read_back = old_size_read_back;
499  		vmw_resource_mob_attach(res);
500  		goto out_wait;
501  	}
502  
503  	vmw_resource_mob_attach(res);
504  	/* Let go of the old mob. */
505  	vmw_user_bo_unref(&old_buf);
506  	res->id = vcotbl->type;
507  
508  	ret = dma_resv_reserve_fences(bo->base.resv, 1);
509  	if (unlikely(ret))
510  		goto out_wait;
511  
512  	/* Release the pin acquired in vmw_bo_create */
513  	ttm_bo_unpin(bo);
514  
515  	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
516  
517  	return 0;
518  
519  out_map_new:
520  	ttm_bo_kunmap(&old_map);
521  out_wait:
522  	ttm_bo_unpin(bo);
523  	ttm_bo_unreserve(bo);
524  	vmw_user_bo_unref(&buf);
525  
526  out_done:
527  	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
528  
529  	return ret;
530  }
531  
532  /**
533   * vmw_cotable_create - Cotable resource create callback
534   *
535   * @res: Pointer to a cotable resource.
536   *
537   * There is no separate create command for cotables, so this callback, which
538   * is called before bind() in the validation sequence is instead used for two
539   * things.
540   * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
541   *    buffer.
542   * 2) Resize the cotable if needed.
543   */
vmw_cotable_create(struct vmw_resource * res)544  static int vmw_cotable_create(struct vmw_resource *res)
545  {
546  	struct vmw_cotable *vcotbl = vmw_cotable(res);
547  	size_t new_size = res->guest_memory_size;
548  	size_t needed_size;
549  	int ret;
550  
551  	/* Check whether we need to resize the cotable */
552  	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
553  	while (needed_size > new_size)
554  		new_size *= 2;
555  
556  	if (likely(new_size <= res->guest_memory_size)) {
557  		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
558  			ret = vmw_cotable_unscrub(res);
559  			if (ret)
560  				return ret;
561  		}
562  		res->id = vcotbl->type;
563  		return 0;
564  	}
565  
566  	return vmw_cotable_resize(res, new_size);
567  }
568  
569  /**
570   * vmw_hw_cotable_destroy - Cotable hw_destroy callback
571   *
572   * @res: Pointer to a cotable resource.
573   *
574   * The final (part of resource destruction) destroy callback.
575   */
vmw_hw_cotable_destroy(struct vmw_resource * res)576  static void vmw_hw_cotable_destroy(struct vmw_resource *res)
577  {
578  	(void) vmw_cotable_destroy(res);
579  }
580  
581  /**
582   * vmw_cotable_free - Cotable resource destructor
583   *
584   * @res: Pointer to a cotable resource.
585   */
vmw_cotable_free(struct vmw_resource * res)586  static void vmw_cotable_free(struct vmw_resource *res)
587  {
588  	kfree(res);
589  }
590  
591  /**
592   * vmw_cotable_alloc - Create a cotable resource
593   *
594   * @dev_priv: Pointer to a device private struct.
595   * @ctx: Pointer to the context resource.
596   * The cotable resource will not add a refcount.
597   * @type: The cotable type.
598   */
vmw_cotable_alloc(struct vmw_private * dev_priv,struct vmw_resource * ctx,u32 type)599  struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
600  				       struct vmw_resource *ctx,
601  				       u32 type)
602  {
603  	struct vmw_cotable *vcotbl;
604  	int ret;
605  	u32 num_entries;
606  
607  	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
608  	if (unlikely(!vcotbl)) {
609  		ret = -ENOMEM;
610  		goto out_no_alloc;
611  	}
612  
613  	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
614  				vmw_cotable_free, &vmw_cotable_func);
615  	if (unlikely(ret != 0))
616  		goto out_no_init;
617  
618  	INIT_LIST_HEAD(&vcotbl->resource_list);
619  	vcotbl->res.id = type;
620  	vcotbl->res.guest_memory_size = PAGE_SIZE;
621  	num_entries = PAGE_SIZE / co_info[type].size;
622  	if (num_entries < co_info[type].min_initial_entries) {
623  		vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
624  			co_info[type].size;
625  		vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
626  	}
627  
628  	vcotbl->scrubbed = true;
629  	vcotbl->seen_entries = -1;
630  	vcotbl->type = type;
631  	vcotbl->ctx = ctx;
632  
633  	vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
634  
635  	return &vcotbl->res;
636  
637  out_no_init:
638  	kfree(vcotbl);
639  out_no_alloc:
640  	return ERR_PTR(ret);
641  }
642  
643  /**
644   * vmw_cotable_notify - Notify the cotable about an item creation
645   *
646   * @res: Pointer to a cotable resource.
647   * @id: Item id.
648   */
vmw_cotable_notify(struct vmw_resource * res,int id)649  int vmw_cotable_notify(struct vmw_resource *res, int id)
650  {
651  	struct vmw_cotable *vcotbl = vmw_cotable(res);
652  
653  	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
654  		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
655  			  (unsigned) vcotbl->type, id);
656  		return -EINVAL;
657  	}
658  
659  	if (vcotbl->seen_entries < id) {
660  		/* Trigger a call to create() on next validate */
661  		res->id = -1;
662  		vcotbl->seen_entries = id;
663  	}
664  
665  	return 0;
666  }
667  
668  /**
669   * vmw_cotable_add_resource - add a view to the cotable's list of active views.
670   *
671   * @res: pointer struct vmw_resource representing the cotable.
672   * @head: pointer to the struct list_head member of the resource, dedicated
673   * to the cotable active resource list.
674   */
vmw_cotable_add_resource(struct vmw_resource * res,struct list_head * head)675  void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
676  {
677  	struct vmw_cotable *vcotbl =
678  		container_of(res, struct vmw_cotable, res);
679  
680  	list_add_tail(head, &vcotbl->resource_list);
681  }
682