2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/notifier.h>
29 #include <linux/compat.h>
34 #include "kfd_dbgmgr.h"
37 * Initial size for the array of queues.
38 * The allocated size is doubled each time
39 * it is exceeded up to MAX_PROCESS_QUEUES.
41 #define INITIAL_QUEUE_ARRAY_SIZE 16
44 * List of struct kfd_process (field kfd_process).
45 * Unique/indexed by mm_struct*
47 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
48 static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
49 static DEFINE_MUTEX(kfd_processes_mutex);
51 DEFINE_STATIC_SRCU(kfd_processes_srcu);
53 static struct workqueue_struct *kfd_process_wq;
55 struct kfd_process_release_work {
56 struct work_struct kfd_work;
57 struct kfd_process *p;
60 static struct kfd_process *find_process(const struct task_struct *thread);
61 static struct kfd_process *create_process(const struct task_struct *thread);
63 void kfd_process_create_wq(void)
66 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
69 void kfd_process_destroy_wq(void)
72 destroy_workqueue(kfd_process_wq);
73 kfd_process_wq = NULL;
77 struct kfd_process *kfd_create_process(const struct task_struct *thread)
79 struct kfd_process *process;
81 BUG_ON(!kfd_process_wq);
83 if (thread->mm == NULL)
84 return ERR_PTR(-EINVAL);
86 /* Only the pthreads threading model is supported. */
87 if (thread->group_leader->mm != thread->mm)
88 return ERR_PTR(-EINVAL);
90 /* Take mmap_sem because we call __mmu_notifier_register inside */
91 down_write(&thread->mm->mmap_sem);
94 * take kfd processes mutex before starting of process creation
95 * so there won't be a case where two threads of the same process
96 * create two kfd_process structures
98 mutex_lock(&kfd_processes_mutex);
100 /* A prior open of /dev/kfd could have already created the process. */
101 process = find_process(thread);
103 pr_debug("kfd: process already found\n");
106 process = create_process(thread);
108 mutex_unlock(&kfd_processes_mutex);
110 up_write(&thread->mm->mmap_sem);
115 struct kfd_process *kfd_get_process(const struct task_struct *thread)
117 struct kfd_process *process;
119 if (thread->mm == NULL)
120 return ERR_PTR(-EINVAL);
122 /* Only the pthreads threading model is supported. */
123 if (thread->group_leader->mm != thread->mm)
124 return ERR_PTR(-EINVAL);
126 process = find_process(thread);
128 return ERR_PTR(-EINVAL);
133 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
135 struct kfd_process *process;
137 hash_for_each_possible_rcu(kfd_processes_table, process,
138 kfd_processes, (uintptr_t)mm)
139 if (process->mm == mm)
145 static struct kfd_process *find_process(const struct task_struct *thread)
147 struct kfd_process *p;
150 idx = srcu_read_lock(&kfd_processes_srcu);
151 p = find_process_by_mm(thread->mm);
152 srcu_read_unlock(&kfd_processes_srcu, idx);
157 static void kfd_process_wq_release(struct work_struct *work)
159 struct kfd_process_release_work *my_work;
160 struct kfd_process_device *pdd, *temp;
161 struct kfd_process *p;
163 my_work = (struct kfd_process_release_work *) work;
167 pr_debug("Releasing process (pasid %d) in workqueue\n",
170 mutex_lock(&p->mutex);
172 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
174 pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
175 pdd->dev->id, p->pasid);
177 if (pdd->reset_wavefronts)
178 dbgdev_wave_reset_wavefronts(pdd->dev, p);
180 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
181 list_del(&pdd->per_device_list);
186 kfd_event_free_process(p);
188 kfd_pasid_free(p->pasid);
190 mutex_unlock(&p->mutex);
192 mutex_destroy(&p->mutex);
201 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
203 struct kfd_process_release_work *work;
204 struct kfd_process *p;
206 BUG_ON(!kfd_process_wq);
208 p = container_of(rcu, struct kfd_process, rcu);
209 BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
213 work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
216 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
218 queue_work(kfd_process_wq, (struct work_struct *) work);
222 static void kfd_process_notifier_release(struct mmu_notifier *mn,
223 struct mm_struct *mm)
225 struct kfd_process *p;
226 struct kfd_process_device *pdd = NULL;
229 * The kfd_process structure can not be free because the
230 * mmu_notifier srcu is read locked
232 p = container_of(mn, struct kfd_process, mmu_notifier);
235 mutex_lock(&kfd_processes_mutex);
236 hash_del_rcu(&p->kfd_processes);
237 mutex_unlock(&kfd_processes_mutex);
238 synchronize_srcu(&kfd_processes_srcu);
240 mutex_lock(&p->mutex);
242 /* In case our notifier is called before IOMMU notifier */
245 /* Iterate over all process device data structure and check
246 * if we should delete debug managers and reset all wavefronts
248 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
249 if ((pdd->dev->dbgmgr) &&
250 (pdd->dev->dbgmgr->pasid == p->pasid))
251 kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
253 if (pdd->reset_wavefronts) {
254 pr_warn("amdkfd: Resetting all wave fronts\n");
255 dbgdev_wave_reset_wavefronts(pdd->dev, p);
256 pdd->reset_wavefronts = false;
260 mutex_unlock(&p->mutex);
263 * Because we drop mm_count inside kfd_process_destroy_delayed
264 * and because the mmu_notifier_unregister function also drop
265 * mm_count we need to take an extra count here.
267 atomic_inc(&p->mm->mm_count);
268 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
269 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
272 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
273 .release = kfd_process_notifier_release,
276 static struct kfd_process *create_process(const struct task_struct *thread)
278 struct kfd_process *process;
281 process = kzalloc(sizeof(*process), GFP_KERNEL);
284 goto err_alloc_process;
286 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
287 sizeof(process->queues[0]), GFP_KERNEL);
288 if (!process->queues)
289 goto err_alloc_queues;
291 process->pasid = kfd_pasid_alloc();
292 if (process->pasid == 0)
293 goto err_alloc_pasid;
295 mutex_init(&process->mutex);
297 process->mm = thread->mm;
299 /* register notifier */
300 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
301 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
303 goto err_mmu_notifier;
305 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
306 (uintptr_t)process->mm);
308 process->lead_thread = thread->group_leader;
310 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
312 INIT_LIST_HEAD(&process->per_device_data);
314 kfd_event_init_process(process);
316 err = pqm_init(&process->pqm, process);
318 goto err_process_pqm_init;
320 /* init process apertures*/
321 process->is_32bit_user_mode = in_compat_syscall();
322 err = kfd_init_apertures(process);
324 goto err_init_apretures;
329 pqm_uninit(&process->pqm);
330 err_process_pqm_init:
331 hash_del_rcu(&process->kfd_processes);
333 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
335 mutex_destroy(&process->mutex);
336 kfd_pasid_free(process->pasid);
338 kfree(process->queues);
345 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
346 struct kfd_process *p)
348 struct kfd_process_device *pdd = NULL;
350 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
357 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
358 struct kfd_process *p)
360 struct kfd_process_device *pdd = NULL;
362 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
365 INIT_LIST_HEAD(&pdd->qpd.queues_list);
366 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
367 pdd->qpd.dqm = dev->dqm;
368 pdd->reset_wavefronts = false;
369 list_add(&pdd->per_device_list, &p->per_device_data);
376 * Direct the IOMMU to bind the process (specifically the pasid->mm)
378 * Unbinding occurs when the process dies or the device is removed.
380 * Assumes that the process lock is held.
382 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
383 struct kfd_process *p)
385 struct kfd_process_device *pdd;
388 pdd = kfd_get_process_device_data(dev, p);
390 pr_err("Process device data doesn't exist\n");
391 return ERR_PTR(-ENOMEM);
397 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
406 void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
408 struct kfd_process *p;
409 struct kfd_process_device *pdd;
414 * Look for the process that matches the pasid. If there is no such
415 * process, we either released it in amdkfd's own notifier, or there
416 * is a bug. Unfortunately, there is no way to tell...
418 p = kfd_lookup_process_by_pasid(pasid);
422 pr_debug("Unbinding process %d from IOMMU\n", pasid);
424 if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
425 kfd_dbgmgr_destroy(dev->dbgmgr);
429 pdd = kfd_get_process_device_data(dev, p);
432 mutex_unlock(&p->mutex);
436 if (pdd->reset_wavefronts) {
437 dbgdev_wave_reset_wavefronts(pdd->dev, p);
438 pdd->reset_wavefronts = false;
442 * Just mark pdd as unbound, because we still need it
443 * to call amd_iommu_unbind_pasid() in when the
445 * We don't call amd_iommu_unbind_pasid() here
446 * because the IOMMU called us.
450 mutex_unlock(&p->mutex);
453 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
455 return list_first_entry(&p->per_device_data,
456 struct kfd_process_device,
460 struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
461 struct kfd_process_device *pdd)
463 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
465 return list_next_entry(pdd, per_device_list);
468 bool kfd_has_process_device_data(struct kfd_process *p)
470 return !(list_empty(&p->per_device_data));
473 /* This returns with process->mutex locked. */
474 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
476 struct kfd_process *p;
479 int idx = srcu_read_lock(&kfd_processes_srcu);
481 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
482 if (p->pasid == pasid) {
483 mutex_lock(&p->mutex);
488 srcu_read_unlock(&kfd_processes_srcu, idx);