GNU Linux-libre 4.14.313-gnu1
[releases.git] / drivers / gpu / drm / amd / amdkfd / kfd_packet_manager.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/slab.h>
25 #include <linux/mutex.h>
26 #include "kfd_device_queue_manager.h"
27 #include "kfd_kernel_queue.h"
28 #include "kfd_priv.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_opcodes.h"
31
32 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
33                                 unsigned int buffer_size_bytes)
34 {
35         unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
36
37         WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
38              "Runlist IB overflow");
39         *wptr = temp;
40 }
41
42 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
43 {
44         union PM4_MES_TYPE_3_HEADER header;
45
46         header.u32All = 0;
47         header.opcode = opcode;
48         header.count = packet_size/sizeof(uint32_t) - 2;
49         header.type = PM4_TYPE_3;
50
51         return header.u32All;
52 }
53
54 static void pm_calc_rlib_size(struct packet_manager *pm,
55                                 unsigned int *rlib_size,
56                                 bool *over_subscription)
57 {
58         unsigned int process_count, queue_count;
59         unsigned int map_queue_size;
60
61         process_count = pm->dqm->processes_count;
62         queue_count = pm->dqm->queue_count;
63
64         /* check if there is over subscription*/
65         *over_subscription = false;
66         if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
67                 *over_subscription = true;
68                 pr_debug("Over subscribed runlist\n");
69         }
70
71         map_queue_size = sizeof(struct pm4_mes_map_queues);
72         /* calculate run list ib allocation size */
73         *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
74                      queue_count * map_queue_size;
75
76         /*
77          * Increase the allocation size in case we need a chained run list
78          * when over subscription
79          */
80         if (*over_subscription)
81                 *rlib_size += sizeof(struct pm4_mes_runlist);
82
83         pr_debug("runlist ib size %d\n", *rlib_size);
84 }
85
86 static int pm_allocate_runlist_ib(struct packet_manager *pm,
87                                 unsigned int **rl_buffer,
88                                 uint64_t *rl_gpu_buffer,
89                                 unsigned int *rl_buffer_size,
90                                 bool *is_over_subscription)
91 {
92         int retval;
93
94         if (WARN_ON(pm->allocated))
95                 return -EINVAL;
96
97         pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
98
99         retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
100                                         &pm->ib_buffer_obj);
101
102         if (retval) {
103                 pr_err("Failed to allocate runlist IB\n");
104                 return retval;
105         }
106
107         *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
108         *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
109
110         memset(*rl_buffer, 0, *rl_buffer_size);
111         pm->allocated = true;
112         return retval;
113 }
114
115 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
116                         uint64_t ib, size_t ib_size_in_dwords, bool chain)
117 {
118         struct pm4_mes_runlist *packet;
119
120         if (WARN_ON(!ib))
121                 return -EFAULT;
122
123         packet = (struct pm4_mes_runlist *)buffer;
124
125         memset(buffer, 0, sizeof(struct pm4_mes_runlist));
126         packet->header.u32All = build_pm4_header(IT_RUN_LIST,
127                                                 sizeof(struct pm4_mes_runlist));
128
129         packet->bitfields4.ib_size = ib_size_in_dwords;
130         packet->bitfields4.chain = chain ? 1 : 0;
131         packet->bitfields4.offload_polling = 0;
132         packet->bitfields4.valid = 1;
133         packet->ordinal2 = lower_32_bits(ib);
134         packet->bitfields3.ib_base_hi = upper_32_bits(ib);
135
136         return 0;
137 }
138
139 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
140                                 struct qcm_process_device *qpd)
141 {
142         struct pm4_mes_map_process *packet;
143         struct queue *cur;
144         uint32_t num_queues;
145
146         packet = (struct pm4_mes_map_process *)buffer;
147
148         memset(buffer, 0, sizeof(struct pm4_mes_map_process));
149
150         packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
151                                         sizeof(struct pm4_mes_map_process));
152         packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
153         packet->bitfields2.process_quantum = 1;
154         packet->bitfields2.pasid = qpd->pqm->process->pasid;
155         packet->bitfields3.page_table_base = qpd->page_table_base;
156         packet->bitfields10.gds_size = qpd->gds_size;
157         packet->bitfields10.num_gws = qpd->num_gws;
158         packet->bitfields10.num_oac = qpd->num_oac;
159         num_queues = 0;
160         list_for_each_entry(cur, &qpd->queues_list, list)
161                 num_queues++;
162         packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
163
164         packet->sh_mem_config = qpd->sh_mem_config;
165         packet->sh_mem_bases = qpd->sh_mem_bases;
166         packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
167         packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
168
169         packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
170
171         packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
172         packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
173
174         return 0;
175 }
176
177 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
178                 struct queue *q, bool is_static)
179 {
180         struct pm4_mes_map_queues *packet;
181         bool use_static = is_static;
182
183         packet = (struct pm4_mes_map_queues *)buffer;
184         memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
185
186         packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
187                                                 sizeof(struct pm4_mes_map_queues));
188         packet->bitfields2.alloc_format =
189                 alloc_format__mes_map_queues__one_per_pipe_vi;
190         packet->bitfields2.num_queues = 1;
191         packet->bitfields2.queue_sel =
192                 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
193
194         packet->bitfields2.engine_sel =
195                 engine_sel__mes_map_queues__compute_vi;
196         packet->bitfields2.queue_type =
197                 queue_type__mes_map_queues__normal_compute_vi;
198
199         switch (q->properties.type) {
200         case KFD_QUEUE_TYPE_COMPUTE:
201                 if (use_static)
202                         packet->bitfields2.queue_type =
203                 queue_type__mes_map_queues__normal_latency_static_queue_vi;
204                 break;
205         case KFD_QUEUE_TYPE_DIQ:
206                 packet->bitfields2.queue_type =
207                         queue_type__mes_map_queues__debug_interface_queue_vi;
208                 break;
209         case KFD_QUEUE_TYPE_SDMA:
210                 packet->bitfields2.engine_sel =
211                                 engine_sel__mes_map_queues__sdma0_vi;
212                 use_static = false; /* no static queues under SDMA */
213                 break;
214         default:
215                 WARN(1, "queue type %d", q->properties.type);
216                 return -EINVAL;
217         }
218         packet->bitfields3.doorbell_offset =
219                         q->properties.doorbell_off;
220
221         packet->mqd_addr_lo =
222                         lower_32_bits(q->gart_mqd_addr);
223
224         packet->mqd_addr_hi =
225                         upper_32_bits(q->gart_mqd_addr);
226
227         packet->wptr_addr_lo =
228                         lower_32_bits((uint64_t)q->properties.write_ptr);
229
230         packet->wptr_addr_hi =
231                         upper_32_bits((uint64_t)q->properties.write_ptr);
232
233         return 0;
234 }
235
236 static int pm_create_runlist_ib(struct packet_manager *pm,
237                                 struct list_head *queues,
238                                 uint64_t *rl_gpu_addr,
239                                 size_t *rl_size_bytes)
240 {
241         unsigned int alloc_size_bytes;
242         unsigned int *rl_buffer, rl_wptr, i;
243         int retval, proccesses_mapped;
244         struct device_process_node *cur;
245         struct qcm_process_device *qpd;
246         struct queue *q;
247         struct kernel_queue *kq;
248         bool is_over_subscription;
249
250         rl_wptr = retval = proccesses_mapped = 0;
251
252         retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
253                                 &alloc_size_bytes, &is_over_subscription);
254         if (retval)
255                 return retval;
256
257         *rl_size_bytes = alloc_size_bytes;
258
259         pr_debug("Building runlist ib process count: %d queues count %d\n",
260                 pm->dqm->processes_count, pm->dqm->queue_count);
261
262         /* build the run list ib packet */
263         list_for_each_entry(cur, queues, list) {
264                 qpd = cur->qpd;
265                 /* build map process packet */
266                 if (proccesses_mapped >= pm->dqm->processes_count) {
267                         pr_debug("Not enough space left in runlist IB\n");
268                         pm_release_ib(pm);
269                         return -ENOMEM;
270                 }
271
272                 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
273                 if (retval)
274                         return retval;
275
276                 proccesses_mapped++;
277                 inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
278                                 alloc_size_bytes);
279
280                 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
281                         if (!kq->queue->properties.is_active)
282                                 continue;
283
284                         pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
285                                 kq->queue->queue, qpd->is_debug);
286
287                         retval = pm_create_map_queue(pm,
288                                                 &rl_buffer[rl_wptr],
289                                                 kq->queue,
290                                                 qpd->is_debug);
291                         if (retval)
292                                 return retval;
293
294                         inc_wptr(&rl_wptr,
295                                 sizeof(struct pm4_mes_map_queues),
296                                 alloc_size_bytes);
297                 }
298
299                 list_for_each_entry(q, &qpd->queues_list, list) {
300                         if (!q->properties.is_active)
301                                 continue;
302
303                         pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
304                                 q->queue, qpd->is_debug);
305
306                         retval = pm_create_map_queue(pm,
307                                                 &rl_buffer[rl_wptr],
308                                                 q,
309                                                 qpd->is_debug);
310
311                         if (retval)
312                                 return retval;
313
314                         inc_wptr(&rl_wptr,
315                                 sizeof(struct pm4_mes_map_queues),
316                                 alloc_size_bytes);
317                 }
318         }
319
320         pr_debug("Finished map process and queues to runlist\n");
321
322         if (is_over_subscription)
323                 retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
324                                         *rl_gpu_addr,
325                                         alloc_size_bytes / sizeof(uint32_t),
326                                         true);
327
328         for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
329                 pr_debug("0x%2X ", rl_buffer[i]);
330         pr_debug("\n");
331
332         return retval;
333 }
334
335 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
336 {
337         pm->dqm = dqm;
338         mutex_init(&pm->lock);
339         pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
340         if (!pm->priv_queue) {
341                 mutex_destroy(&pm->lock);
342                 return -ENOMEM;
343         }
344         pm->allocated = false;
345
346         return 0;
347 }
348
349 void pm_uninit(struct packet_manager *pm)
350 {
351         mutex_destroy(&pm->lock);
352         kernel_queue_uninit(pm->priv_queue);
353 }
354
355 int pm_send_set_resources(struct packet_manager *pm,
356                                 struct scheduling_resources *res)
357 {
358         struct pm4_mes_set_resources *packet;
359         int retval = 0;
360
361         mutex_lock(&pm->lock);
362         pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
363                                         sizeof(*packet) / sizeof(uint32_t),
364                                         (unsigned int **)&packet);
365         if (!packet) {
366                 pr_err("Failed to allocate buffer on kernel queue\n");
367                 retval = -ENOMEM;
368                 goto out;
369         }
370
371         memset(packet, 0, sizeof(struct pm4_mes_set_resources));
372         packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
373                                         sizeof(struct pm4_mes_set_resources));
374
375         packet->bitfields2.queue_type =
376                         queue_type__mes_set_resources__hsa_interface_queue_hiq;
377         packet->bitfields2.vmid_mask = res->vmid_mask;
378         packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
379         packet->bitfields7.oac_mask = res->oac_mask;
380         packet->bitfields8.gds_heap_base = res->gds_heap_base;
381         packet->bitfields8.gds_heap_size = res->gds_heap_size;
382
383         packet->gws_mask_lo = lower_32_bits(res->gws_mask);
384         packet->gws_mask_hi = upper_32_bits(res->gws_mask);
385
386         packet->queue_mask_lo = lower_32_bits(res->queue_mask);
387         packet->queue_mask_hi = upper_32_bits(res->queue_mask);
388
389         pm->priv_queue->ops.submit_packet(pm->priv_queue);
390
391 out:
392         mutex_unlock(&pm->lock);
393
394         return retval;
395 }
396
397 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
398 {
399         uint64_t rl_gpu_ib_addr;
400         uint32_t *rl_buffer;
401         size_t rl_ib_size, packet_size_dwords;
402         int retval;
403
404         retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
405                                         &rl_ib_size);
406         if (retval)
407                 goto fail_create_runlist_ib;
408
409         pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
410
411         packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
412         mutex_lock(&pm->lock);
413
414         retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
415                                         packet_size_dwords, &rl_buffer);
416         if (retval)
417                 goto fail_acquire_packet_buffer;
418
419         retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
420                                         rl_ib_size / sizeof(uint32_t), false);
421         if (retval)
422                 goto fail_create_runlist;
423
424         pm->priv_queue->ops.submit_packet(pm->priv_queue);
425
426         mutex_unlock(&pm->lock);
427
428         return retval;
429
430 fail_create_runlist:
431         pm->priv_queue->ops.rollback_packet(pm->priv_queue);
432 fail_acquire_packet_buffer:
433         mutex_unlock(&pm->lock);
434 fail_create_runlist_ib:
435         pm_release_ib(pm);
436         return retval;
437 }
438
439 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
440                         uint32_t fence_value)
441 {
442         int retval;
443         struct pm4_mes_query_status *packet;
444
445         if (WARN_ON(!fence_address))
446                 return -EFAULT;
447
448         mutex_lock(&pm->lock);
449         retval = pm->priv_queue->ops.acquire_packet_buffer(
450                         pm->priv_queue,
451                         sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
452                         (unsigned int **)&packet);
453         if (retval)
454                 goto fail_acquire_packet_buffer;
455
456         packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
457                                         sizeof(struct pm4_mes_query_status));
458
459         packet->bitfields2.context_id = 0;
460         packet->bitfields2.interrupt_sel =
461                         interrupt_sel__mes_query_status__completion_status;
462         packet->bitfields2.command =
463                         command__mes_query_status__fence_only_after_write_ack;
464
465         packet->addr_hi = upper_32_bits((uint64_t)fence_address);
466         packet->addr_lo = lower_32_bits((uint64_t)fence_address);
467         packet->data_hi = upper_32_bits((uint64_t)fence_value);
468         packet->data_lo = lower_32_bits((uint64_t)fence_value);
469
470         pm->priv_queue->ops.submit_packet(pm->priv_queue);
471
472 fail_acquire_packet_buffer:
473         mutex_unlock(&pm->lock);
474         return retval;
475 }
476
477 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
478                         enum kfd_preempt_type_filter mode,
479                         uint32_t filter_param, bool reset,
480                         unsigned int sdma_engine)
481 {
482         int retval;
483         uint32_t *buffer;
484         struct pm4_mes_unmap_queues *packet;
485
486         mutex_lock(&pm->lock);
487         retval = pm->priv_queue->ops.acquire_packet_buffer(
488                         pm->priv_queue,
489                         sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
490                         &buffer);
491         if (retval)
492                 goto err_acquire_packet_buffer;
493
494         packet = (struct pm4_mes_unmap_queues *)buffer;
495         memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
496         pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
497                 mode, reset, type);
498         packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
499                                         sizeof(struct pm4_mes_unmap_queues));
500         switch (type) {
501         case KFD_QUEUE_TYPE_COMPUTE:
502         case KFD_QUEUE_TYPE_DIQ:
503                 packet->bitfields2.engine_sel =
504                         engine_sel__mes_unmap_queues__compute;
505                 break;
506         case KFD_QUEUE_TYPE_SDMA:
507                 packet->bitfields2.engine_sel =
508                         engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
509                 break;
510         default:
511                 WARN(1, "queue type %d", type);
512                 retval = -EINVAL;
513                 goto err_invalid;
514         }
515
516         if (reset)
517                 packet->bitfields2.action =
518                                 action__mes_unmap_queues__reset_queues;
519         else
520                 packet->bitfields2.action =
521                                 action__mes_unmap_queues__preempt_queues;
522
523         switch (mode) {
524         case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
525                 packet->bitfields2.queue_sel =
526                                 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
527                 packet->bitfields2.num_queues = 1;
528                 packet->bitfields3b.doorbell_offset0 = filter_param;
529                 break;
530         case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
531                 packet->bitfields2.queue_sel =
532                                 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
533                 packet->bitfields3a.pasid = filter_param;
534                 break;
535         case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
536                 packet->bitfields2.queue_sel =
537                                 queue_sel__mes_unmap_queues__unmap_all_queues;
538                 break;
539         case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
540                 /* in this case, we do not preempt static queues */
541                 packet->bitfields2.queue_sel =
542                                 queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
543                 break;
544         default:
545                 WARN(1, "filter %d", mode);
546                 retval = -EINVAL;
547                 goto err_invalid;
548         }
549
550         pm->priv_queue->ops.submit_packet(pm->priv_queue);
551
552         mutex_unlock(&pm->lock);
553         return 0;
554
555 err_invalid:
556         pm->priv_queue->ops.rollback_packet(pm->priv_queue);
557 err_acquire_packet_buffer:
558         mutex_unlock(&pm->lock);
559         return retval;
560 }
561
562 void pm_release_ib(struct packet_manager *pm)
563 {
564         mutex_lock(&pm->lock);
565         if (pm->allocated) {
566                 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
567                 pm->allocated = false;
568         }
569         mutex_unlock(&pm->lock);
570 }