1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, NVIDIA Corporation.
6 #include <linux/device.h>
7 #include <linux/kref.h>
9 #include <linux/of_platform.h>
10 #include <linux/pid.h>
11 #include <linux/slab.h>
16 static void host1x_memory_context_release(struct device *dev)
18 /* context device is freed in host1x_memory_context_list_free() */
21 int host1x_memory_context_list_init(struct host1x *host1x)
23 struct host1x_memory_context_list *cdl = &host1x->context_list;
24 struct device_node *node = host1x->dev->of_node;
25 struct host1x_memory_context *ctx;
31 mutex_init(&cdl->lock);
33 err = of_property_count_u32_elems(node, "iommu-map");
38 cdl->devs = kcalloc(cdl->len, sizeof(*cdl->devs), GFP_KERNEL);
42 for (i = 0; i < cdl->len; i++) {
43 struct iommu_fwspec *fwspec;
49 device_initialize(&ctx->dev);
52 * Due to an issue with T194 NVENC, only 38 bits can be used.
53 * Anyway, 256GiB of IOVA ought to be enough for anyone.
55 ctx->dma_mask = DMA_BIT_MASK(38);
56 ctx->dev.dma_mask = &ctx->dma_mask;
57 ctx->dev.coherent_dma_mask = ctx->dma_mask;
58 dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
59 ctx->dev.bus = &host1x_context_device_bus_type;
60 ctx->dev.parent = host1x->dev;
61 ctx->dev.release = host1x_memory_context_release;
63 dma_set_max_seg_size(&ctx->dev, UINT_MAX);
65 err = device_add(&ctx->dev);
67 dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
68 put_device(&ctx->dev);
72 err = of_dma_configure_id(&ctx->dev, node, true, &i);
74 dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
76 device_unregister(&ctx->dev);
80 fwspec = dev_iommu_fwspec_get(&ctx->dev);
81 if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
82 dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
83 device_unregister(&ctx->dev);
87 ctx->stream_id = fwspec->ids[0] & 0xffff;
94 device_unregister(&cdl->devs[i].dev);
103 void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
107 for (i = 0; i < cdl->len; i++)
108 device_unregister(&cdl->devs[i].dev);
114 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
117 struct host1x_memory_context_list *cdl = &host1x->context_list;
118 struct host1x_memory_context *free = NULL;
122 return ERR_PTR(-EOPNOTSUPP);
124 mutex_lock(&cdl->lock);
126 for (i = 0; i < cdl->len; i++) {
127 struct host1x_memory_context *cd = &cdl->devs[i];
129 if (cd->owner == pid) {
130 refcount_inc(&cd->ref);
131 mutex_unlock(&cdl->lock);
133 } else if (!cd->owner && !free) {
139 mutex_unlock(&cdl->lock);
140 return ERR_PTR(-EBUSY);
143 refcount_set(&free->ref, 1);
144 free->owner = get_pid(pid);
146 mutex_unlock(&cdl->lock);
150 EXPORT_SYMBOL_GPL(host1x_memory_context_alloc);
152 void host1x_memory_context_get(struct host1x_memory_context *cd)
154 refcount_inc(&cd->ref);
156 EXPORT_SYMBOL_GPL(host1x_memory_context_get);
158 void host1x_memory_context_put(struct host1x_memory_context *cd)
160 struct host1x_memory_context_list *cdl = &cd->host->context_list;
162 if (refcount_dec_and_mutex_lock(&cd->ref, &cdl->lock)) {
165 mutex_unlock(&cdl->lock);
168 EXPORT_SYMBOL_GPL(host1x_memory_context_put);