1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Copyright (c) by Takashi Iwai <tiwai@suse.de>
6 * EMU10K1 memory page allocation (PTB area)
10 #include <linux/gfp.h>
11 #include <linux/time.h>
12 #include <linux/mutex.h>
13 #include <linux/export.h>
15 #include <sound/core.h>
16 #include <sound/emu10k1.h>
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
19 * aligned pages in others
21 #define __set_ptb_entry(emu,page,addr) \
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
27 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
28 #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
29 #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
30 /* get aligned page from offset address */
31 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
32 /* get offset address from aligned page */
33 #define aligned_page_offset(page) ((page) << PAGE_SHIFT)
35 #if PAGE_SIZE == EMUPAGESIZE && !IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
36 /* fill PTB entrie(s) corresponding to page with addr */
37 #define set_ptb_entry(emu,page,addr) __set_ptb_entry(emu,page,addr)
38 /* fill PTB entrie(s) corresponding to page with silence pointer */
39 #define set_silent_ptb(emu,page) __set_ptb_entry(emu,page,emu->silent_page.addr)
41 /* fill PTB entries -- we need to fill UNIT_PAGES entries */
42 static inline void set_ptb_entry(struct snd_emu10k1 *emu, int page, dma_addr_t addr)
46 for (i = 0; i < UNIT_PAGES; i++, page++) {
47 __set_ptb_entry(emu, page, addr);
48 dev_dbg(emu->card->dev, "mapped page %d to entry %.8x\n", page,
49 (unsigned int)__get_ptb_entry(emu, page));
53 static inline void set_silent_ptb(struct snd_emu10k1 *emu, int page)
57 for (i = 0; i < UNIT_PAGES; i++, page++) {
58 /* do not increment ptr */
59 __set_ptb_entry(emu, page, emu->silent_page.addr);
60 dev_dbg(emu->card->dev, "mapped silent page %d to entry %.8x\n",
61 page, (unsigned int)__get_ptb_entry(emu, page));
64 #endif /* PAGE_SIZE */
69 static int synth_alloc_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
70 static int synth_free_pages(struct snd_emu10k1 *hw, struct snd_emu10k1_memblk *blk);
72 #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member)
75 /* initialize emu10k1 part */
76 static void emu10k1_memblk_init(struct snd_emu10k1_memblk *blk)
78 blk->mapped_page = -1;
79 INIT_LIST_HEAD(&blk->mapped_link);
80 INIT_LIST_HEAD(&blk->mapped_order_link);
83 blk->first_page = get_aligned_page(blk->mem.offset);
84 blk->last_page = get_aligned_page(blk->mem.offset + blk->mem.size - 1);
85 blk->pages = blk->last_page - blk->first_page + 1;
89 * search empty region on PTB with the given size
91 * if an empty region is found, return the page and store the next mapped block
93 * if not found, return a negative error code.
95 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
97 int page = 1, found_page = -ENOMEM;
98 int max_size = npages;
100 struct list_head *candidate = &emu->mapped_link_head;
101 struct list_head *pos;
103 list_for_each (pos, &emu->mapped_link_head) {
104 struct snd_emu10k1_memblk *blk = get_emu10k1_memblk(pos, mapped_link);
105 if (blk->mapped_page < 0)
107 size = blk->mapped_page - page;
108 if (size == npages) {
112 else if (size > max_size) {
113 /* we look for the maximum empty hole */
118 page = blk->mapped_page + blk->pages;
120 size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
121 if (size >= max_size) {
130 * map a memory block onto emu10k1's PTB
132 * call with memblk_lock held
134 static int map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
137 struct list_head *next;
139 page = search_empty_map_area(emu, blk->pages, &next);
140 if (page < 0) /* not found */
143 dev_err(emu->card->dev, "trying to map zero (reserved) page\n");
146 /* insert this block in the proper position of mapped list */
147 list_add_tail(&blk->mapped_link, next);
148 /* append this as a newest block in order list */
149 list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
150 blk->mapped_page = page;
152 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
153 set_ptb_entry(emu, page, emu->page_addr_table[pg]);
161 * return the size of resultant empty pages
163 * call with memblk_lock held
165 static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
167 int start_page, end_page, mpage, pg;
169 struct snd_emu10k1_memblk *q;
171 /* calculate the expected size of empty region */
172 p = blk->mapped_link.prev;
173 if (p != &emu->mapped_link_head) {
174 q = get_emu10k1_memblk(p, mapped_link);
175 start_page = q->mapped_page + q->pages;
179 p = blk->mapped_link.next;
180 if (p != &emu->mapped_link_head) {
181 q = get_emu10k1_memblk(p, mapped_link);
182 end_page = q->mapped_page;
184 end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
188 list_del(&blk->mapped_link);
189 list_del(&blk->mapped_order_link);
191 mpage = blk->mapped_page;
192 for (pg = blk->first_page; pg <= blk->last_page; pg++) {
193 set_silent_ptb(emu, mpage);
196 blk->mapped_page = -1;
197 return end_page - start_page; /* return the new empty size */
201 * search empty pages with the given size, and create a memory block
203 * unlike synth_alloc the memory block is aligned to the page start
205 static struct snd_emu10k1_memblk *
206 search_empty(struct snd_emu10k1 *emu, int size)
209 struct snd_emu10k1_memblk *blk;
212 psize = get_aligned_page(size + PAGE_SIZE -1);
214 list_for_each(p, &emu->memhdr->block) {
215 blk = get_emu10k1_memblk(p, mem.list);
216 if (page + psize <= blk->first_page)
218 page = blk->last_page + 1;
220 if (page + psize > emu->max_cache_pages)
224 /* create a new memory block */
225 blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev);
228 blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
229 emu10k1_memblk_init(blk);
235 * check if the given pointer is valid for pages
237 static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
239 if (addr & ~emu->dma_mask) {
240 dev_err_ratelimited(emu->card->dev,
241 "max memory size is 0x%lx (addr = 0x%lx)!!\n",
242 emu->dma_mask, (unsigned long)addr);
245 if (addr & (EMUPAGESIZE-1)) {
246 dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
253 * map the given memory block on PTB.
254 * if the block is already mapped, update the link order.
255 * if no empty pages are found, tries to release unused memory blocks
256 * and retry the mapping.
258 int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
262 struct list_head *p, *nextp;
263 struct snd_emu10k1_memblk *deleted;
266 spin_lock_irqsave(&emu->memblk_lock, flags);
267 if (blk->mapped_page >= 0) {
268 /* update order link */
269 list_move_tail(&blk->mapped_order_link,
270 &emu->mapped_order_link_head);
271 spin_unlock_irqrestore(&emu->memblk_lock, flags);
274 err = map_memblk(emu, blk);
276 /* no enough page - try to unmap some blocks */
277 /* starting from the oldest block */
278 p = emu->mapped_order_link_head.next;
279 for (; p != &emu->mapped_order_link_head; p = nextp) {
281 deleted = get_emu10k1_memblk(p, mapped_order_link);
282 if (deleted->map_locked)
284 size = unmap_memblk(emu, deleted);
285 if (size >= blk->pages) {
286 /* ok the empty region is enough large */
287 err = map_memblk(emu, blk);
292 spin_unlock_irqrestore(&emu->memblk_lock, flags);
296 EXPORT_SYMBOL(snd_emu10k1_memblk_map);
299 * page allocation for DMA
301 struct snd_util_memblk *
302 snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *substream)
304 struct snd_pcm_runtime *runtime = substream->runtime;
305 struct snd_util_memhdr *hdr;
306 struct snd_emu10k1_memblk *blk;
309 if (snd_BUG_ON(!emu))
311 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
312 runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
315 if (snd_BUG_ON(!hdr))
318 idx = runtime->period_size >= runtime->buffer_size ?
319 (emu->delay_pcm_irq * 2) : 0;
320 mutex_lock(&hdr->block_mutex);
321 blk = search_empty(emu, runtime->dma_bytes + idx);
323 mutex_unlock(&hdr->block_mutex);
326 /* fill buffer addresses but pointers are not stored so that
327 * snd_free_pci_page() is not called in in synth_free()
330 for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
331 unsigned long ofs = idx << PAGE_SHIFT;
333 if (ofs >= runtime->dma_bytes)
334 addr = emu->silent_page.addr;
336 addr = snd_pcm_sgbuf_get_addr(substream, ofs);
337 if (! is_valid_page(emu, addr)) {
338 dev_err_ratelimited(emu->card->dev,
339 "emu: failure page = %d\n", idx);
340 mutex_unlock(&hdr->block_mutex);
343 emu->page_addr_table[page] = addr;
344 emu->page_ptr_table[page] = NULL;
347 /* set PTB entries */
348 blk->map_locked = 1; /* do not unmap this block! */
349 err = snd_emu10k1_memblk_map(emu, blk);
351 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
352 mutex_unlock(&hdr->block_mutex);
355 mutex_unlock(&hdr->block_mutex);
356 return (struct snd_util_memblk *)blk;
361 * release DMA buffer from page table
363 int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
365 if (snd_BUG_ON(!emu || !blk))
367 return snd_emu10k1_synth_free(emu, blk);
371 * allocate DMA pages, widening the allocation if necessary
373 * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why
374 * this might be needed.
376 * If you modify this function check whether __synth_free_pages() also needs
379 int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
380 struct snd_dma_buffer *dmab)
382 if (emu->iommu_workaround) {
383 size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
384 size_t size_real = npages * PAGE_SIZE;
387 * The device has been observed to accesses up to 256 extra
388 * bytes, but use 1k to be safe.
390 if (size_real < size + 1024)
394 return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
395 &emu->pci->dev, size, dmab);
399 * memory allocation using multiple pages (for synth)
400 * Unlike the DMA allocation above, non-contiguous pages are assined.
404 * allocate a synth sample area
406 struct snd_util_memblk *
407 snd_emu10k1_synth_alloc(struct snd_emu10k1 *hw, unsigned int size)
409 struct snd_emu10k1_memblk *blk;
410 struct snd_util_memhdr *hdr = hw->memhdr;
412 mutex_lock(&hdr->block_mutex);
413 blk = (struct snd_emu10k1_memblk *)__snd_util_mem_alloc(hdr, size);
415 mutex_unlock(&hdr->block_mutex);
418 if (synth_alloc_pages(hw, blk)) {
419 __snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
420 mutex_unlock(&hdr->block_mutex);
423 snd_emu10k1_memblk_map(hw, blk);
424 mutex_unlock(&hdr->block_mutex);
425 return (struct snd_util_memblk *)blk;
428 EXPORT_SYMBOL(snd_emu10k1_synth_alloc);
431 * free a synth sample area
434 snd_emu10k1_synth_free(struct snd_emu10k1 *emu, struct snd_util_memblk *memblk)
436 struct snd_util_memhdr *hdr = emu->memhdr;
437 struct snd_emu10k1_memblk *blk = (struct snd_emu10k1_memblk *)memblk;
440 mutex_lock(&hdr->block_mutex);
441 spin_lock_irqsave(&emu->memblk_lock, flags);
442 if (blk->mapped_page >= 0)
443 unmap_memblk(emu, blk);
444 spin_unlock_irqrestore(&emu->memblk_lock, flags);
445 synth_free_pages(emu, blk);
446 __snd_util_mem_free(hdr, memblk);
447 mutex_unlock(&hdr->block_mutex);
451 EXPORT_SYMBOL(snd_emu10k1_synth_free);
453 /* check new allocation range */
454 static void get_single_page_range(struct snd_util_memhdr *hdr,
455 struct snd_emu10k1_memblk *blk,
456 int *first_page_ret, int *last_page_ret)
459 struct snd_emu10k1_memblk *q;
460 int first_page, last_page;
461 first_page = blk->first_page;
462 p = blk->mem.list.prev;
463 if (p != &hdr->block) {
464 q = get_emu10k1_memblk(p, mem.list);
465 if (q->last_page == first_page)
466 first_page++; /* first page was already allocated */
468 last_page = blk->last_page;
469 p = blk->mem.list.next;
470 if (p != &hdr->block) {
471 q = get_emu10k1_memblk(p, mem.list);
472 if (q->first_page == last_page)
473 last_page--; /* last page was already allocated */
475 *first_page_ret = first_page;
476 *last_page_ret = last_page;
479 /* release allocated pages */
480 static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
483 struct snd_dma_buffer dmab;
486 dmab.dev.type = SNDRV_DMA_TYPE_DEV;
487 dmab.dev.dev = &emu->pci->dev;
489 for (page = first_page; page <= last_page; page++) {
490 if (emu->page_ptr_table[page] == NULL)
492 dmab.area = emu->page_ptr_table[page];
493 dmab.addr = emu->page_addr_table[page];
496 * please keep me in sync with logic in
497 * snd_emu10k1_alloc_pages_maybe_wider()
499 dmab.bytes = PAGE_SIZE;
500 if (emu->iommu_workaround)
503 snd_dma_free_pages(&dmab);
504 emu->page_addr_table[page] = 0;
505 emu->page_ptr_table[page] = NULL;
510 * allocate kernel pages
512 static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
514 int page, first_page, last_page;
515 struct snd_dma_buffer dmab;
517 emu10k1_memblk_init(blk);
518 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
519 /* allocate kernel pages */
520 for (page = first_page; page <= last_page; page++) {
521 if (snd_emu10k1_alloc_pages_maybe_wider(emu, PAGE_SIZE,
524 if (!is_valid_page(emu, dmab.addr)) {
525 snd_dma_free_pages(&dmab);
528 emu->page_addr_table[page] = dmab.addr;
529 emu->page_ptr_table[page] = dmab.area;
534 /* release allocated pages */
535 last_page = page - 1;
536 __synth_free_pages(emu, first_page, last_page);
544 static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
546 int first_page, last_page;
548 get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
549 __synth_free_pages(emu, first_page, last_page);
553 /* calculate buffer pointer from offset address */
554 static inline void *offset_ptr(struct snd_emu10k1 *emu, int page, int offset)
557 if (snd_BUG_ON(page < 0 || page >= emu->max_cache_pages))
559 ptr = emu->page_ptr_table[page];
561 dev_err(emu->card->dev,
562 "access to NULL ptr: page = %d\n", page);
565 ptr += offset & (PAGE_SIZE - 1);
570 * bzero(blk + offset, size)
572 int snd_emu10k1_synth_bzero(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
573 int offset, int size)
575 int page, nextofs, end_offset, temp, temp1;
577 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
579 offset += blk->offset & (PAGE_SIZE - 1);
580 end_offset = offset + size;
581 page = get_aligned_page(offset);
583 nextofs = aligned_page_offset(page + 1);
584 temp = nextofs - offset;
585 temp1 = end_offset - offset;
588 ptr = offset_ptr(emu, page + p->first_page, offset);
590 memset(ptr, 0, temp);
593 } while (offset < end_offset);
597 EXPORT_SYMBOL(snd_emu10k1_synth_bzero);
600 * copy_from_user(blk + offset, data, size)
602 int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_memblk *blk,
603 int offset, const char __user *data, int size)
605 int page, nextofs, end_offset, temp, temp1;
607 struct snd_emu10k1_memblk *p = (struct snd_emu10k1_memblk *)blk;
609 offset += blk->offset & (PAGE_SIZE - 1);
610 end_offset = offset + size;
611 page = get_aligned_page(offset);
613 nextofs = aligned_page_offset(page + 1);
614 temp = nextofs - offset;
615 temp1 = end_offset - offset;
618 ptr = offset_ptr(emu, page + p->first_page, offset);
619 if (ptr && copy_from_user(ptr, data, temp))
624 } while (offset < end_offset);
628 EXPORT_SYMBOL(snd_emu10k1_synth_copy_from_user);